From 618a36502354469b8196da400596f3fefaacf8ea Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 30 Oct 2021 10:14:37 -0400 Subject: [PATCH 001/632] init rel_1_4 branch Change-Id: I982c86c3f3cc4b1ed1956a9eb92cf4612535ae45 --- .gitreview | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitreview b/.gitreview index 01d8b1770f7..20b0caa53e1 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] host=gerrit.sqlalchemy.org project=sqlalchemy/sqlalchemy -defaultbranch=main +defaultbranch=rel_1_4 From e760e0e1fe229766734abde6b438d75320bdac48 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 1 Nov 2021 12:06:32 -0400 Subject: [PATCH 002/632] Revise "literal parameters" FAQ section based on feedback in #7271, the emphasis on TypeDecorator as a solution to this problem is not very practical. illustrate a series of quick recipes that are useful for debugging purposes to print out a repr() or simple stringify of a parameter without the need to construct custom dialects or types. Change-Id: I788ce1b5ea01d88dd0a22d03d06f35aabff5e5c8 (cherry picked from commit b2a28c556f10ee31605c978173f0cce62175ad61) --- doc/build/faq/sqlexpressions.rst | 254 ++++++++++++++++++++++++++----- lib/sqlalchemy/sql/compiler.py | 24 ++- 2 files changed, 241 insertions(+), 37 deletions(-) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index 93653a10ced..cc629f4cc07 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -61,6 +61,13 @@ use a PostgreSQL dialect:: from sqlalchemy.dialects import postgresql print(statement.compile(dialect=postgresql.dialect())) +Note that any dialect can be assembled using :func:`_sa.create_engine` itself +with a dummy URL and then accessing the :attr:`_engine.Engine.dialect` attribute, +such as if we wanted a dialect object for psycopg2:: + + e = create_engine("postgresql+psycopg2://") + psycopg2_dialect = e.dialect + When given an ORM :class:`~.orm.query.Query` object, in order to get at the :meth:`_expression.ClauseElement.compile` method we only need access the :attr:`~.orm.query.Query.statement` @@ -72,7 +79,7 @@ accessor first:: Rendering Bound Parameters Inline ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. warning:: **Never** use this technique with string content received from +.. warning:: **Never** use these techniques with string content received from untrusted input, such as from web forms or other user-input applications. SQLAlchemy's facilities to coerce Python values into direct SQL string values are **not secure against untrusted input and do not validate the type @@ -98,44 +105,185 @@ flag, passed to ``compile_kwargs``:: # **do not use** with untrusted input!!! print(s.compile(compile_kwargs={"literal_binds": True})) -The above approach has the caveats that it is only supported for basic -types, such as ints and strings, and furthermore if a :func:`.bindparam` -without a pre-set value is used directly, it won't be able to -stringify that either. + # to render for a specific dialect + print(s.compile(dialect=dialect, compile_kwargs={"literal_binds": True})) -This functionality is provided mainly for -logging or debugging purposes, where having the raw sql string of a query -may prove useful. Note that the ``dialect`` parameter should also -passed to the :meth:`_expression.ClauseElement.compile` method to render -the query that will be sent to the database. + # or if you have an Engine, pass as first argument + print(s.compile(some_engine, compile_kwargs={"literal_binds": True})) -To support inline literal rendering for types not supported, implement -a :class:`.TypeDecorator` for the target type which includes a -:meth:`.TypeDecorator.process_literal_param` method:: +This functionality is provided mainly for logging or debugging purposes, where +having the raw sql string of a query may prove useful. - from sqlalchemy import TypeDecorator, Integer +The above approach has the caveats that it is only supported for basic types, +such as ints and strings, and furthermore if a :func:`.bindparam` without a +pre-set value is used directly, it won't be able to stringify that either. +Methods of stringifying all parameters unconditionally are detailed below. +.. tip:: - class MyFancyType(TypeDecorator): - impl = Integer + The reason SQLAlchemy does not support full stringification of all + datatypes is threefold: - def process_literal_param(self, value, dialect): - return "my_fancy_formatting(%s)" % value + 1. This is a functionality that is already supported by the DBAPI in use + when the DBAPI is used normally. The SQLAlchemy project cannot be + tasked with duplicating this functionality for every datatype for + all backends, as this is redundant work which also incurs significant + testing and ongoing support overhead. + + 2. Stringifying with bound parameters inlined for specific databases + suggests a usage that is actually passing these fully stringified + statements onto the database for execution. This is unnecessary and + insecure, and SQLAlchemy does not want to encourage this use in any + way. + + 3. The area of rendering literal values is the most likely area for + security issues to be reported. SQLAlchemy tries to keep the area of + safe parameter stringification an issue for the DBAPI drivers as much + as possible where the specifics for each DBAPI can be handled + appropriately and securely. + +As SQLAlchemy intentionally does not support full stringification of literal +values, techniques to do so within specific debugging scenarios include the +following. As an example, we will use the PostgreSQL :class:`_postgresql.UUID` +datatype:: + + import uuid + + from sqlalchemy import Column + from sqlalchemy import create_engine + from sqlalchemy import Integer + from sqlalchemy import select + from sqlalchemy.dialects.postgresql import UUID + from sqlalchemy.orm import declarative_base + + + Base = declarative_base() + + class A(Base): + __tablename__ = 'a' + + id = Column(Integer, primary_key=True) + data = Column(UUID) + + stmt = select(A).where(A.data == uuid.uuid4()) + +Given the above model and statement which will compare a column to a single +UUID value, options for stringifying this statement with inline values +include: + +* Some DBAPIs such as psycopg2 support helper functions like + `mogrify() `_ which + provide access to their literal-rendering functionality. To use such + features, render the SQL string without using ``literal_binds`` and pass + the parameters separately via the :attr:`.SQLCompiler.params` accessor:: + + e = create_engine("postgresql+psycopg2://scott:tiger@localhost/test") + + with e.connect() as conn: + cursor = conn.connection.cursor() + compiled = stmt.compile(e) + + print(cursor.mogrify(str(compiled), compiled.params)) + + The above code will produce psycopg2's raw bytestring:: + + b"SELECT a.id, a.data \nFROM a \nWHERE a.data = 'a511b0fc-76da-4c47-a4b4-716a8189b7ac'::uuid" + +* Render the :attr:`.SQLCompiler.params` directly into the statement, using + the appropriate `paramstyle `_ + of the target DBAPI. For example, the psycopg2 DBAPI uses the named ``pyformat`` + style. The meaning of ``render_postcompile`` will be discussed in the next + section. **WARNING this is NOT secure, do NOT use untrusted input**:: + + e = create_engine("postgresql+psycopg2://") + + # will use pyformat style, i.e. %(paramname)s for param + compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) + + print(str(compiled) % compiled.params) + + This will produce a non-working string, that nonetheless is suitable for + debugging:: + + SELECT a.id, a.data + FROM a + WHERE a.data = 9eec1209-50b4-4253-b74b-f82461ed80c1 - from sqlalchemy import Table, Column, MetaData + Another example using a positional paramstyle such as ``qmark``, we can render + our above statement in terms of SQLite by also using the + :attr:`.SQLCompiler.positiontup` collection in conjunction with + :attr:`.SQLCompiler.params`, in order to retrieve the parameters in + their positional order for the statement as compiled:: - tab = Table('mytable', MetaData(), Column('x', MyFancyType())) + import re + e = create_engine("sqlite+pysqlite://") - stmt = tab.select().where(tab.c.x > 5) - print(stmt.compile(compile_kwargs={"literal_binds": True})) + # will use qmark style, i.e. ? for param + compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) -producing output like:: + # params in positional order + params = (repr(compiled.params[name]) for name in compiled.positiontup) - SELECT mytable.x - FROM mytable - WHERE mytable.x > my_fancy_formatting(5) + print(re.sub(r'\?', lambda m: next(params), str(compiled))) + The above snippet prints:: + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('1bd70375-db17-4d8c-94f1-fc2ef3aada26') + +* Use the :ref:`sqlalchemy.ext.compiler_toplevel` extension to render + :class:`_sql.BindParameter` objects in a custom way when a user-defined + flag is present. This flag is sent through the ``compile_kwargs`` + dictionary like any other flag:: + + from sqlalchemy.ext.compiler import compiles + from sqlalchemy.sql.expression import BindParameter + + @compiles(BindParameter) + def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw): + if not use_my_literal_recipe: + # use normal bindparam processing + return compiler.visit_bindparam(element, **kw) + + # if use_my_literal_recipe was passed to compiler_kwargs, + # render the value directly + return repr(element.value) + + e = create_engine("postgresql+psycopg2://") + print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True})) + + The above recipe will print:: + + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('47b154cd-36b2-42ae-9718-888629ab9857') + +* For type-specific stringification that's built into a model or a statement, the + :class:`_types.TypeDecorator` class may be used to provide custom stringification + of any datatype using the :meth:`.TypeDecorator.process_literal_param` method:: + + from sqlalchemy import TypeDecorator + + class UUIDStringify(TypeDecorator): + impl = UUID + + def process_literal_param(self, value, dialect): + return repr(value) + + The above datatype needs to be used either explicitly within the model + or locally within the statement using :func:`_sql.type_coerce`, such as :: + + from sqlalchemy import type_coerce + stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4()) + + print(stmt.compile(e, compile_kwargs={"literal_binds": True})) + + Again printing the same form:: + + SELECT a.id, a.data + FROM a + WHERE a.data = UUID('47b154cd-36b2-42ae-9718-888629ab9857') Rendering "POSTCOMPILE" Parameters as Bound Parameters ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -149,23 +297,57 @@ known values are passed. "Expanding" parameters are used for string can be safely cached independently of the actual lists of values being passed to a particular invocation of :meth:`_sql.ColumnOperators.in_`:: - >>> from sqlalchemy import column - >>> expr = column('x').in_([1, 2, 3]) - >>> print(expr) - x IN ([POSTCOMPILE_x_1]) + >>> stmt = select(A).where(A.id.in_[1, 2, 3]) To render the IN clause with real bound parameter symbols, use the ``render_postcompile=True`` flag with :meth:`_sql.ClauseElement.compile`:: - >>> print(expr.compile(compile_kwargs={"render_postcompile": True})) - x IN (:x_1_1, :x_1_2, :x_1_3) + >>> e = create_engine("postgresql+psycopg2://") + >>> print(stmt.compile(e, compile_kwargs={"render_postcompile": True})) + SELECT a.id, a.data + FROM a + WHERE a.id IN (%(id_1_1)s, %(id_1_2)s, %(id_1_3)s) + +The ``literal_binds`` flag, described in the previous section regarding +rendering of bound parameters, automatically sets ``render_postcompile`` to +True, so for a statement with simple ints/strings, these can be stringified +directly:: + + # render_postcompile is implied by literal_binds + >>> print(stmt.compile(e, compile_kwargs={"literal_binds": True})) + SELECT a.id, a.data + FROM a + WHERE a.id IN (1, 2, 3) + +The :attr:`.SQLCompiler.params` and :attr:`.SQLCompiler.positiontup` are +also compatible with ``render_postcompile``, so that +the previous recipes for rendering inline bound parameters will work here +in the same way, such as SQLite's positional form:: + + >>> u1, u2, u3 = uuid.uuid4(), uuid.uuid4(), uuid.uuid4() + >>> stmt = select(A).where(A.data.in_([u1, u2, u3])) + + >>> import re + >>> e = create_engine("sqlite+pysqlite://") + >>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) + >>> params = (repr(compiled.params[name]) for name in compiled.positiontup) + >>> print(re.sub(r'\?', lambda m: next(params), str(compiled))) + SELECT a.id, a.data + FROM a + WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa')) + +.. warning:: + + Remember, all of the above code recipes are **only to be used when**: + + 1. the use is **debugging purposes only** -As described in the previous section, the ``literal_binds`` flag works here -by automatically setting ``render_postcompile`` to True:: + 2. the string **is not to be passed to a live production database** - >>> print(expr.compile(compile_kwargs={"literal_binds": True})) - x IN (1, 2, 3) + 3. only with **local, trusted input** + The above recipes for stringification of parameters are **not secure in + any way and should never be used against production databases**. .. _faq_sql_expression_percent_signs: diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 0cd568fcc64..266452851bc 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -670,6 +670,21 @@ class SQLCompiler(Compiled): """ + positiontup = None + """for a compiled construct that uses a positional paramstyle, will be + a sequence of strings, indicating the names of bound parameters in order. + + This is used in order to render bound parameters in their correct order, + and is combined with the :attr:`_sql.Compiled.params` dictionary to + render parameters. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ + inline = False def __init__( @@ -1091,7 +1106,14 @@ def _lookup_type(typ): @property def params(self): """Return the bind param dictionary embedded into this - compiled object, for those values that are present.""" + compiled object, for those values that are present. + + .. seealso:: + + :ref:`faq_sql_expression_string` - includes a usage example for + debugging use cases. + + """ return self.construct_params(_check=False) def _process_parameters_for_postcompile( From 370624ce0eab6439352eaf502a222a7bf415dc14 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 1 Nov 2021 16:36:51 -0400 Subject: [PATCH 003/632] use full context manager flow for future.Engine.begin() Fixed issue in future :class:`_future.Engine` where calling upon :meth:`_future.Engine.begin` and entering the context manager would not close the connection if the actual BEGIN operation failed for some reason, such as an event handler raising an exception; this use case failed to be tested for the future version of the engine. Note that the "future" context managers which handle ``begin()`` blocks in Core and ORM don't actually run the "BEGIN" operation until the context managers are actually entered. This is different from the legacy version which runs the "BEGIN" operation up front. Fixes: #7272 Change-Id: I9667ac0861a9e007c4b3dfcf0fcf0829038a8711 (cherry picked from commit c4abf5a44249fa42ae9c5d5c3035b8258c6d92b6) --- doc/build/changelog/unreleased_14/7272.rst | 14 ++++++++ lib/sqlalchemy/future/engine.py | 21 +++-------- test/engine/test_execute.py | 41 +++++++++++++++++++--- 3 files changed, 55 insertions(+), 21 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7272.rst diff --git a/doc/build/changelog/unreleased_14/7272.rst b/doc/build/changelog/unreleased_14/7272.rst new file mode 100644 index 00000000000..a38aacdaa8e --- /dev/null +++ b/doc/build/changelog/unreleased_14/7272.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, engine + :tickets: 7272 + :versions: 2.0.0b1 + + Fixed issue in future :class:`_future.Engine` where calling upon + :meth:`_future.Engine.begin` and entering the context manager would not + close the connection if the actual BEGIN operation failed for some reason, + such as an event handler raising an exception; this use case failed to be + tested for the future version of the engine. Note that the "future" context + managers which handle ``begin()`` blocks in Core and ORM don't actually run + the "BEGIN" operation until the context managers are actually entered. This + is different from the legacy version which runs the "BEGIN" operation up + front. diff --git a/lib/sqlalchemy/future/engine.py b/lib/sqlalchemy/future/engine.py index ab890ca4f4c..3235529f736 100644 --- a/lib/sqlalchemy/future/engine.py +++ b/lib/sqlalchemy/future/engine.py @@ -353,21 +353,7 @@ def _future_facade(self, legacy_engine): execution_options=legacy_engine._execution_options, ) - class _trans_ctx(object): - def __init__(self, conn): - self.conn = conn - - def __enter__(self): - self.transaction = self.conn.begin() - self.transaction.__enter__() - return self.conn - - def __exit__(self, type_, value, traceback): - try: - self.transaction.__exit__(type_, value, traceback) - finally: - self.conn.close() - + @util.contextmanager def begin(self): """Return a :class:`_future.Connection` object with a transaction begun. @@ -390,8 +376,9 @@ def begin(self): :meth:`_future.Connection.begin` """ - conn = self.connect() - return self._trans_ctx(conn) + with self.connect() as conn: + with conn.begin(): + yield conn def connect(self): """Return a new :class:`_future.Connection` object. diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index 4a14cbcca0c..22731f5d082 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -813,16 +813,42 @@ def test_transaction_engine_ctx_commit(self): testing.run_as_contextmanager(ctx, fn, 5, value=8) self._assert_fn(5, value=8) - def test_transaction_engine_ctx_begin_fails(self): + def test_transaction_engine_ctx_begin_fails_dont_enter_enter(self): + """test #7272""" engine = engines.testing_engine() mock_connection = Mock( return_value=Mock(begin=Mock(side_effect=Exception("boom"))) ) - engine._connection_cls = mock_connection - assert_raises(Exception, engine.begin) + with mock.patch.object(engine, "_connection_cls", mock_connection): + if testing.requires.legacy_engine.enabled: + with expect_raises_message(Exception, "boom"): + engine.begin() + else: + # context manager isn't entered, doesn't actually call + # connect() or connection.begin() + engine.begin() - eq_(mock_connection.return_value.close.mock_calls, [call()]) + if testing.requires.legacy_engine.enabled: + eq_(mock_connection.return_value.close.mock_calls, [call()]) + else: + eq_(mock_connection.return_value.close.mock_calls, []) + + def test_transaction_engine_ctx_begin_fails_include_enter(self): + """test #7272""" + engine = engines.testing_engine() + + close_mock = Mock() + with mock.patch.object( + engine._connection_cls, + "begin", + Mock(side_effect=Exception("boom")), + ), mock.patch.object(engine._connection_cls, "close", close_mock): + with expect_raises_message(Exception, "boom"): + with engine.begin(): + pass + + eq_(close_mock.mock_calls, [call()]) def test_transaction_engine_ctx_rollback(self): fn = self._trans_rollback_fn() @@ -867,6 +893,7 @@ def test_connection_as_ctx(self): self._assert_fn(5, value=8) @testing.fails_on("mysql+oursql", "oursql bug ? getting wrong rowcount") + @testing.requires.legacy_engine def test_connect_as_ctx_noautocommit(self): fn = self._trans_fn() self._assert_no_data() @@ -878,6 +905,12 @@ def test_connect_as_ctx_noautocommit(self): self._assert_no_data() +class FutureConvenienceExecuteTest( + fixtures.FutureEngineMixin, ConvenienceExecuteTest +): + __backend__ = True + + class CompiledCacheTest(fixtures.TestBase): __backend__ = True From e35b3f499d48ea2963d266bbf689c508cef60d8b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Nov 2021 10:58:01 -0400 Subject: [PATCH 004/632] ensure soft_close occurs for fetchmany with server side cursor Fixed regression where the :meth:`_engine.CursorResult.fetchmany` method would fail to autoclose a server-side cursor (i.e. when ``stream_results`` or ``yield_per`` is in use, either Core or ORM oriented results) when the results were fully exhausted. All :class:`_result.Result` objects will now consistently raise :class:`_exc.ResourceClosedError` if they are used after a hard close, which includes the "hard close" that occurs after calling "single row or value" methods like :meth:`_result.Result.first` and :meth:`_result.Result.scalar`. This was already the behavior of the most common class of result objects returned for Core statement executions, i.e. those based on :class:`_engine.CursorResult`, so this behavior is not new. However, the change has been extended to properly accommodate for the ORM "filtering" result objects returned when using 2.0 style ORM queries, which would previously behave in "soft closed" style of returning empty results, or wouldn't actually "soft close" at all and would continue yielding from the underlying cursor. As part of this change, also added :meth:`_result.Result.close` to the base :class:`_result.Result` class and implemented it for the filtered result implementations that are used by the ORM, so that it is possible to call the :meth:`_engine.CursorResult.close` method on the underlying :class:`_engine.CursorResult` when the the ``yield_per`` execution option is in use to close a server side cursor before remaining ORM results have been fetched. This was again already available for Core result sets but the change makes it available for 2.0 style ORM results as well. Fixes: #7274 Change-Id: Id4acdfedbcab891582a7f8edd2e2e7d20d868e53 (cherry picked from commit 33824a9c06ca555ad208a9925bc7b40fe489fc72) --- doc/build/changelog/unreleased_14/7274.rst | 37 ++++++++++ lib/sqlalchemy/engine/cursor.py | 10 ++- lib/sqlalchemy/engine/result.py | 56 +++++++++++++-- test/base/test_result.py | 14 +++- test/orm/test_query.py | 80 ++++++++++++++++++++++ test/sql/test_resultset.py | 62 +++++++++++++++++ 6 files changed, 249 insertions(+), 10 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7274.rst diff --git a/doc/build/changelog/unreleased_14/7274.rst b/doc/build/changelog/unreleased_14/7274.rst new file mode 100644 index 00000000000..7364ae65381 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7274.rst @@ -0,0 +1,37 @@ +.. change:: + :tags: bug, core, regression + :tickets: 7274 + :versions: 2.0.0b1 + + Fixed regression where the :meth:`_engine.CursorResult.fetchmany` method + would fail to autoclose a server-side cursor (i.e. when ``stream_results`` + or ``yield_per`` is in use, either Core or ORM oriented results) when the + results were fully exhausted. + +.. change:: + :tags: bug, orm + :tickets: 7274 + :versions: 2.0.0b1 + + All :class:`_result.Result` objects will now consistently raise + :class:`_exc.ResourceClosedError` if they are used after a hard close, + which includes the "hard close" that occurs after calling "single row or + value" methods like :meth:`_result.Result.first` and + :meth:`_result.Result.scalar`. This was already the behavior of the most + common class of result objects returned for Core statement executions, i.e. + those based on :class:`_engine.CursorResult`, so this behavior is not new. + However, the change has been extended to properly accommodate for the ORM + "filtering" result objects returned when using 2.0 style ORM queries, + which would previously behave in "soft closed" style of returning empty + results, or wouldn't actually "soft close" at all and would continue + yielding from the underlying cursor. + + As part of this change, also added :meth:`_result.Result.close` to the base + :class:`_result.Result` class and implemented it for the filtered result + implementations that are used by the ORM, so that it is possible to call + the :meth:`_engine.CursorResult.close` method on the underlying + :class:`_engine.CursorResult` when the the ``yield_per`` execution option + is in use to close a server side cursor before remaining ORM results have + been fetched. This was again already available for Core result sets but the + change makes it available for 2.0 style ORM results as well. + diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 5e6078f8662..79f87fc0e04 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1043,6 +1043,8 @@ def create(cls, result): ) def _buffer_rows(self, result, dbapi_cursor): + """this is currently used only by fetchone().""" + size = self._bufsize try: if size < 1: @@ -1095,9 +1097,14 @@ def fetchmany(self, result, dbapi_cursor, size=None): lb = len(buf) if size > lb: try: - buf.extend(dbapi_cursor.fetchmany(size - lb)) + new = dbapi_cursor.fetchmany(size - lb) except BaseException as e: self.handle_exception(result, dbapi_cursor, e) + else: + if not new: + result._soft_close() + else: + buf.extend(new) result = buf[0:size] self._rowbuffer = collections.deque(buf[size:]) @@ -1348,7 +1355,6 @@ def _soft_close(self, hard=False): """ - if (not hard and self._soft_closed) or (hard and self.closed): return diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 3c2e682be65..7cf0bd3f966 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -739,6 +739,28 @@ def __init__(self, cursor_metadata): def _soft_close(self, hard=False): raise NotImplementedError() + def close(self): + """close this :class:`_result.Result`. + + The behavior of this method is implementation specific, and is + not implemented by default. The method should generally end + the resources in use by the result object and also cause any + subsequent iteration or row fetching to raise + :class:`.ResourceClosedError`. + + .. versionadded:: 1.4.27 - ``.close()`` was previously not generally + available for all :class:`_result.Result` classes, instead only + being available on the :class:`_engine.CursorResult` returned for + Core statement executions. As most other result objects, namely the + ones used by the ORM, are proxying a :class:`_engine.CursorResult` + in any case, this allows the underlying cursor result to be closed + from the outside facade for the case when the ORM query is using + the ``yield_per`` execution option where it does not immediately + exhaust and autoclose the database cursor. + + """ + self._soft_close(hard=True) + @_generative def yield_per(self, num): """Configure the row-fetching strategy to fetch num rows at a time. @@ -1612,6 +1634,8 @@ class IteratorResult(Result): """ + _hard_closed = False + def __init__( self, cursor_metadata, @@ -1624,16 +1648,29 @@ def __init__( self.raw = raw self._source_supports_scalars = _source_supports_scalars - def _soft_close(self, **kw): + def _soft_close(self, hard=False, **kw): + if hard: + self._hard_closed = True + if self.raw is not None: + self.raw._soft_close(hard=hard, **kw) self.iterator = iter([]) + self._reset_memoizations() + + def _raise_hard_closed(self): + raise exc.ResourceClosedError("This result object is closed.") def _raw_row_iterator(self): return self.iterator def _fetchiter_impl(self): + if self._hard_closed: + self._raise_hard_closed() return self.iterator def _fetchone_impl(self, hard_close=False): + if self._hard_closed: + self._raise_hard_closed() + row = next(self.iterator, _NO_ROW) if row is _NO_ROW: self._soft_close(hard=hard_close) @@ -1642,12 +1679,18 @@ def _fetchone_impl(self, hard_close=False): return row def _fetchall_impl(self): + if self._hard_closed: + self._raise_hard_closed() + try: return list(self.iterator) finally: self._soft_close() def _fetchmany_impl(self, size=None): + if self._hard_closed: + self._raise_hard_closed() + return list(itertools.islice(self.iterator, 0, size)) @@ -1696,6 +1739,10 @@ def yield_per(self, num): self._yield_per = num self.iterator = itertools.chain.from_iterable(self.chunks(num)) + def _soft_close(self, **kw): + super(ChunkedIteratorResult, self)._soft_close(**kw) + self.chunks = lambda size: [] + def _fetchmany_impl(self, size=None): if self.dynamic_yield_per: self.iterator = itertools.chain.from_iterable(self.chunks(size)) @@ -1733,11 +1780,8 @@ def __init__(self, cursor_metadata, results): *[r._attributes for r in results] ) - def close(self): - self._soft_close(hard=True) - - def _soft_close(self, hard=False): + def _soft_close(self, hard=False, **kw): for r in self._results: - r._soft_close(hard=hard) + r._soft_close(hard=hard, **kw) if hard: self.closed = True diff --git a/test/base/test_result.py b/test/base/test_result.py index d94602203ce..8c9eb398e15 100644 --- a/test/base/test_result.py +++ b/test/base/test_result.py @@ -484,7 +484,12 @@ def test_first(self): row = result.first() eq_(row, (1, 1, 1)) - eq_(result.all(), []) + # note this is a behavior change in 1.4.27 due to + # adding a real result.close() to Result, previously this would + # return an empty list. this is already the + # behavior with CursorResult, but was mis-implemented for + # other non-cursor result sets. + assert_raises(exc.ResourceClosedError, result.all) def test_one_unique(self): # assert that one() counts rows after uniqueness has been applied. @@ -597,7 +602,12 @@ def test_scalar(self): eq_(result.scalar(), 1) - eq_(result.all(), []) + # note this is a behavior change in 1.4.27 due to + # adding a real result.close() to Result, previously this would + # return an empty list. this is already the + # behavior with CursorResult, but was mis-implemented for + # other non-cursor result sets. + assert_raises(exc.ResourceClosedError, result.all) def test_partition(self): result = self._fixture() diff --git a/test/orm/test_query.py b/test/orm/test_query.py index c567cf1d16b..a4e2ab3fa07 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -73,6 +73,7 @@ from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_not_none from sqlalchemy.testing.assertsql import CompiledSQL @@ -5271,6 +5272,8 @@ class YieldTest(_fixtures.FixtureTest): run_setup_mappers = "each" run_inserts = "each" + __backend__ = True + def _eagerload_mappings(self, addresses_lazy=True, user_lazy=True): User, Address = self.classes("User", "Address") users, addresses = self.tables("users", "addresses") @@ -5313,6 +5316,83 @@ def test_basic(self): except StopIteration: pass + def test_we_can_close_cursor(self): + """test new usecase close() added along with #7274""" + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + stmt = select(User).execution_options(yield_per=15) + result = sess.execute(stmt) + + with mock.patch.object(result.raw, "_soft_close") as mock_close: + two_results = result.fetchmany(2) + eq_(len(two_results), 2) + + eq_(mock_close.mock_calls, []) + + result.close() + + eq_(mock_close.mock_calls, [mock.call(hard=True)]) + + with expect_raises(sa.exc.ResourceClosedError): + result.fetchmany(10) + + with expect_raises(sa.exc.ResourceClosedError): + result.fetchone() + + with expect_raises(sa.exc.ResourceClosedError): + result.all() + + result.close() + + @testing.combinations("fetchmany", "fetchone", "fetchall") + def test_cursor_is_closed_on_exhausted(self, fetch_method): + """test #7274""" + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + stmt = select(User).execution_options(yield_per=15) + result = sess.execute(stmt) + + with mock.patch.object(result.raw, "_soft_close") as mock_close: + # call assertions are implementation specific. + # test needs that _soft_close called at least once and without + # the hard=True flag + if fetch_method == "fetchmany": + while True: + buf = result.fetchmany(2) + if not buf: + break + eq_(mock_close.mock_calls, [mock.call()]) + elif fetch_method == "fetchall": + eq_(len(result.all()), 4) + eq_( + mock_close.mock_calls, [mock.call(), mock.call(hard=False)] + ) + elif fetch_method == "fetchone": + while True: + row = result.fetchone() + if row is None: + break + eq_( + mock_close.mock_calls, [mock.call(), mock.call(hard=False)] + ) + else: + assert False + + # soft closed, we can still get an empty result + eq_(result.all(), []) + + # real closed + result.close() + assert_raises(sa.exc.ResourceClosedError, result.all) + def test_yield_per_and_execution_options_legacy(self): self._eagerload_mappings() diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index bf912bd2553..c02b3cbc1b6 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -2682,6 +2682,68 @@ def test_buffered_fetchmany_yield_per(self, connection): # buffer of 98, plus buffer of 99 - 89, 10 rows eq_(len(result.cursor_strategy._rowbuffer), 10) + @testing.combinations(True, False, argnames="close_on_init") + @testing.combinations( + "fetchone", "fetchmany", "fetchall", argnames="fetch_style" + ) + def test_buffered_fetch_auto_soft_close( + self, connection, close_on_init, fetch_style + ): + """test #7274""" + + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 30)], + ) + + result = connection.execute(table.select().limit(15)) + assert isinstance(result.cursor_strategy, _cursor.CursorFetchStrategy) + + if close_on_init: + # close_on_init - the initial buffering will exhaust the cursor, + # should soft close immediately + result = result.yield_per(30) + else: + # not close_on_init - soft close will occur after fetching an + # empty buffer + result = result.yield_per(5) + assert isinstance( + result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + + with mock.patch.object(result, "_soft_close") as soft_close: + if fetch_style == "fetchone": + while True: + row = result.fetchone() + + if row: + eq_(soft_close.mock_calls, []) + else: + # fetchone() is also used by first(), scalar() + # and one() which want to embed a hard close in one + # step + eq_(soft_close.mock_calls, [mock.call(hard=False)]) + break + elif fetch_style == "fetchmany": + while True: + rows = result.fetchmany(5) + + if rows: + eq_(soft_close.mock_calls, []) + else: + eq_(soft_close.mock_calls, [mock.call()]) + break + elif fetch_style == "fetchall": + rows = result.fetchall() + + eq_(soft_close.mock_calls, [mock.call()]) + else: + assert False + + result.close() + def test_buffered_fetchmany_yield_per_all(self, connection): table = self.tables.test From 3c1752ed03760c0dc29d8d204d97070a08ef7ea3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Nov 2021 18:19:35 -0400 Subject: [PATCH 005/632] map Float to asyncpg.FLOAT, test for infinity Fixes: #7283 Change-Id: I5402a72617b7f9bc366d64bc5ce8669374839984 (cherry picked from commit 9fa79bb53638e02aaa45d77397b39a1b652ba5f1) --- doc/build/changelog/unreleased_14/7283.rst | 9 +++++++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 9 +++++++++ lib/sqlalchemy/testing/requirements.py | 6 ++++++ lib/sqlalchemy/testing/suite/test_types.py | 10 ++++++++++ test/requirements.py | 8 ++++++++ 5 files changed, 42 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7283.rst diff --git a/doc/build/changelog/unreleased_14/7283.rst b/doc/build/changelog/unreleased_14/7283.rst new file mode 100644 index 00000000000..0cfd2a491e5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7283.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, postgresql + :tickets: 7283 + + Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" + PostgreSQL type instead of "numeric" so that the value ``float(inf)`` can + be accommodated. Added test suite support for persisence of the "inf" + value. + diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 3d195e691ae..913b9315974 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -249,6 +249,9 @@ def process(value): class AsyncpgNumeric(sqltypes.Numeric): + def get_dbapi_type(self, dbapi): + return dbapi.NUMBER + def bind_processor(self, dialect): return None @@ -277,6 +280,11 @@ def result_processor(self, dialect, coltype): ) +class AsyncpgFloat(AsyncpgNumeric): + def get_dbapi_type(self, dbapi): + return dbapi.FLOAT + + class AsyncpgREGCLASS(REGCLASS): def get_dbapi_type(self, dbapi): return dbapi.STRING @@ -883,6 +891,7 @@ class PGDialect_asyncpg(PGDialect): sqltypes.Integer: AsyncpgInteger, sqltypes.BigInteger: AsyncpgBigInteger, sqltypes.Numeric: AsyncpgNumeric, + sqltypes.Float: AsyncpgFloat, sqltypes.JSON: AsyncpgJSON, json.JSONB: AsyncpgJSONB, sqltypes.JSON.JSONPathType: AsyncpgJSONPathType, diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index f8b5dd6062a..e6f669e4c33 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -980,6 +980,12 @@ def precision_numerics_retains_significant_digits(self): return exclusions.closed() + @property + def infinity_floats(self): + """The Float type can persist and load float('inf'), float('-inf').""" + + return exclusions.closed() + @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index 22b85f398d9..d62b608095a 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -590,6 +590,16 @@ def test_numeric_as_float(self, do_numeric_test): [15.7563], ) + @testing.requires.infinity_floats + def test_infinity_floats(self, do_numeric_test): + """test for #977, #7283""" + + do_numeric_test( + Float(None), + [float("inf")], + [float("inf")], + ) + @testing.requires.fetch_null_from_numeric def test_numeric_null_as_decimal(self, do_numeric_test): do_numeric_test(Numeric(precision=8, scale=4), [None], [None]) diff --git a/test/requirements.py b/test/requirements.py index 687dadfd1aa..006c523a69d 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1269,6 +1269,14 @@ def precision_numerics_retains_significant_digits(self): ] ) + @property + def infinity_floats(self): + return fails_on_everything_except( + "sqlite", "postgresql+psycopg2", "postgresql+asyncpg" + ) + skip_if( + "postgresql+pg8000", "seems to work on pg14 only, not earlier?" + ) + @property def precision_generic_float_type(self): """target backend will return native floating point numbers with at From 5dbc02d51e0869b243f1aa9a35a6369eeb78010a Mon Sep 17 00:00:00 2001 From: Cristian Sabaila Date: Tue, 2 Nov 2021 21:39:08 -0400 Subject: [PATCH 006/632] Fixed issue in visit_on_duplicate_key_update within a composed expression Fixed issue in MySQL :meth:`_mysql.Insert.on_duplicate_key_update` which would render the wrong column name when an expression were used in a VALUES expression. Pull request courtesy Cristian Sabaila. Fixes: #7281 Closes: #7285 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7285 Pull-request-sha: 3e6ad6f2fecc6ae36a10a5a34b5d3d393483edbb Change-Id: I83377c20eae6358fead9e7e361127938e538a71c (cherry picked from commit 5740a843ed805d0b066b1e56e8bf3c584c32cf6b) --- doc/build/changelog/unreleased_14/7281.rst | 8 ++++++++ lib/sqlalchemy/dialects/mysql/base.py | 2 +- test/dialect/mysql/test_compiler.py | 4 ++-- test/dialect/mysql/test_on_duplicate.py | 12 +++++++++--- 4 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7281.rst diff --git a/doc/build/changelog/unreleased_14/7281.rst b/doc/build/changelog/unreleased_14/7281.rst new file mode 100644 index 00000000000..a5ca9a1622c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7281.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mysql + :tickets: 7281 + :versions: 2.0.0b1 + + Fixed issue in MySQL :meth:`_mysql.Insert.on_duplicate_key_update` which + would render the wrong column name when an expression were used in a VALUES + expression. Pull request courtesy Cristian Sabaila. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index ad38fee979e..a9e3d0de7d9 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1302,7 +1302,7 @@ def replace(obj): and obj.table is on_duplicate.inserted_alias ): obj = literal_column( - "VALUES(" + self.preparer.quote(column.name) + ")" + "VALUES(" + self.preparer.quote(obj.name) + ")" ) return obj else: diff --git a/test/dialect/mysql/test_compiler.py b/test/dialect/mysql/test_compiler.py index 708039f943e..ba162b49020 100644 --- a/test/dialect/mysql/test_compiler.py +++ b/test/dialect/mysql/test_compiler.py @@ -1110,12 +1110,12 @@ def test_update_sql_expr(self): ) stmt = stmt.on_duplicate_key_update( bar=func.coalesce(stmt.inserted.bar), - baz=stmt.inserted.baz + "some literal", + baz=stmt.inserted.baz + "some literal" + stmt.inserted.bar, ) expected_sql = ( "INSERT INTO foos (id, bar) VALUES (%s, %s), (%s, %s) ON " "DUPLICATE KEY UPDATE bar = coalesce(VALUES(bar)), " - "baz = (concat(VALUES(baz), %s))" + "baz = (concat(concat(VALUES(baz), %s), VALUES(bar)))" ) self.assert_compile( stmt, diff --git a/test/dialect/mysql/test_on_duplicate.py b/test/dialect/mysql/test_on_duplicate.py index 65d5b8364e7..5a4e6ca8d5c 100644 --- a/test/dialect/mysql/test_on_duplicate.py +++ b/test/dialect/mysql/test_on_duplicate.py @@ -100,13 +100,19 @@ def test_on_duplicate_key_update_expression_multirow(self, connection): conn.execute(insert(foos).values(dict(id=1, bar="b", baz="bz"))) stmt = insert(foos).values([dict(id=1, bar="ab"), dict(id=2, bar="b")]) stmt = stmt.on_duplicate_key_update( - bar=func.concat(stmt.inserted.bar, "_foo") + bar=func.concat(stmt.inserted.bar, "_foo"), + baz=func.concat(stmt.inserted.bar, "_", foos.c.baz), ) result = conn.execute(stmt) eq_(result.inserted_primary_key, (None,)) eq_( - conn.execute(foos.select().where(foos.c.id == 1)).fetchall(), - [(1, "ab_foo", "bz", False)], + conn.execute(foos.select()).fetchall(), + [ + # first entry triggers ON DUPLICATE + (1, "ab_foo", "ab_bz", False), + # second entry must be an insert + (2, "b", None, False), + ], ) def test_on_duplicate_key_update_preserve_order(self, connection): From e7d2e6754ba3fd677b5c61cd540da91884d90a8b Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Tue, 2 Nov 2021 16:16:50 -0600 Subject: [PATCH 007/632] Gracefully degrade unsupported types with asyncpg Fixes: #7284 Modify the on_connect() method of PGDialect_asyncpg to gracefully degrade unsupported types instead of throwing a ValueError. Useful for third-party dialects that derive from PGDialect_asyncpg but whose databases do not support all types (e.g., CockroachDB supports JSONB but not JSON). Change-Id: Ibb7cc8c3de632d27b9716a93d83956a590b2a2b0 (cherry picked from commit 96c294da8a50d692b3f0b8e508dbbca5d9c22f1b) --- doc/build/changelog/unreleased_14/7284.rst | 11 +++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 43 +++++++++++++------ 2 files changed, 40 insertions(+), 14 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7284.rst diff --git a/doc/build/changelog/unreleased_14/7284.rst b/doc/build/changelog/unreleased_14/7284.rst new file mode 100644 index 00000000000..fb05488f50c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7284.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: asyncpg, usecase + :tickets: 7284 + :versions: 2.0.0b1 + + Modified the asyncpg dialect to gracefully degrade types that are + not supported by PostgreSQL work-alike databases. For example, + CockroachDB supports JSONB but not JSON. Third-party dialects that + are derived from ``PGDialect_asyncpg`` will no longer have to + work around ValueError being raised by trying to register a codec + for an unsupported type. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 3d195e691ae..4e968a9b62b 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -870,6 +870,8 @@ class PGDialect_asyncpg(PGDialect): use_native_uuid = True + _exclude_type_codecs = util.EMPTY_SET + colspecs = util.update_copy( PGDialect.colspecs, { @@ -1022,21 +1024,34 @@ async def _setup_type_codecs(conn): See https://github.com/MagicStack/asyncpg/issues/623 for reference on why it's set up this way. + + Also, see https://github.com/sqlalchemy/sqlalchemy/issues/7284 for + the rationale behind adding self._exclude_type_codecs """ - await conn._connection.set_type_codec( - "json", - encoder=str.encode, - decoder=_json_decoder, - schema="pg_catalog", - format="binary", - ) - await conn._connection.set_type_codec( - "jsonb", - encoder=_jsonb_encoder, - decoder=_jsonb_decoder, - schema="pg_catalog", - format="binary", - ) + + if "json" not in self._exclude_type_codecs: + try: + await conn._connection.set_type_codec( + "json", + encoder=str.encode, + decoder=_json_decoder, + schema="pg_catalog", + format="binary", + ) + except ValueError: + self._exclude_type_codecs |= {"json"} + + if "jsonb" not in self._exclude_type_codecs: + try: + await conn._connection.set_type_codec( + "jsonb", + encoder=_jsonb_encoder, + decoder=_jsonb_decoder, + schema="pg_catalog", + format="binary", + ) + except ValueError: + self._exclude_type_codecs |= {"jsonb"} def connect(conn): conn.await_(_setup_type_codecs(conn)) From 504e774e8cf014ceecd43f091a9bf840e6c1fe7e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Nov 2021 11:01:08 -0400 Subject: [PATCH 008/632] formatting updates Change-Id: I7352bed0115b8fcdb4708e012d83e81d1ae494ed (cherry picked from commit d80f5277e1c6a6043408244288effb08713c8d70) --- doc/build/changelog/unreleased_14/7274.rst | 2 +- doc/build/changelog/unreleased_14/7283.rst | 2 +- doc/build/changelog/unreleased_14/7284.rst | 2 +- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 5 +++-- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7274.rst b/doc/build/changelog/unreleased_14/7274.rst index 7364ae65381..08e961a7290 100644 --- a/doc/build/changelog/unreleased_14/7274.rst +++ b/doc/build/changelog/unreleased_14/7274.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, core, regression + :tags: bug, engine, regression :tickets: 7274 :versions: 2.0.0b1 diff --git a/doc/build/changelog/unreleased_14/7283.rst b/doc/build/changelog/unreleased_14/7283.rst index 0cfd2a491e5..ce16f0561a8 100644 --- a/doc/build/changelog/unreleased_14/7283.rst +++ b/doc/build/changelog/unreleased_14/7283.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, postgresql + :tags: bug, postgresql, asyncpg :tickets: 7283 Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" diff --git a/doc/build/changelog/unreleased_14/7284.rst b/doc/build/changelog/unreleased_14/7284.rst index fb05488f50c..fbbbafa80fe 100644 --- a/doc/build/changelog/unreleased_14/7284.rst +++ b/doc/build/changelog/unreleased_14/7284.rst @@ -1,5 +1,5 @@ .. change:: - :tags: asyncpg, usecase + :tags: postgresql, usecase, asyncpg :tickets: 7284 :versions: 2.0.0b1 diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 548b7ecfbeb..21f9e3e26e7 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1034,8 +1034,9 @@ async def _setup_type_codecs(conn): See https://github.com/MagicStack/asyncpg/issues/623 for reference on why it's set up this way. - Also, see https://github.com/sqlalchemy/sqlalchemy/issues/7284 for - the rationale behind adding self._exclude_type_codecs + See #7284 for the rationale behind adding + self._exclude_type_codecs + """ if "json" not in self._exclude_type_codecs: From 810a46039f7d8f5af856ae76f779de511b287fc2 Mon Sep 17 00:00:00 2001 From: mike bayer Date: Wed, 3 Nov 2021 15:13:22 +0000 Subject: [PATCH 009/632] Revert "Gracefully degrade unsupported types with asyncpg" This reverts commit 96c294da8a50d692b3f0b8e508dbbca5d9c22f1b. I have another approach that is more obvious, easier to override explicitly and also I can test it more easily. Change-Id: I11a3be7700dbc6f25d436e450b6fb8e8f6c4fd16 (cherry picked from commit 2cc49191e28bcf05d97787d6cdc561dd6815e847) --- doc/build/changelog/unreleased_14/7284.rst | 11 ----- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 43 ++++++------------- 2 files changed, 14 insertions(+), 40 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7284.rst diff --git a/doc/build/changelog/unreleased_14/7284.rst b/doc/build/changelog/unreleased_14/7284.rst deleted file mode 100644 index fbbbafa80fe..00000000000 --- a/doc/build/changelog/unreleased_14/7284.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: postgresql, usecase, asyncpg - :tickets: 7284 - :versions: 2.0.0b1 - - Modified the asyncpg dialect to gracefully degrade types that are - not supported by PostgreSQL work-alike databases. For example, - CockroachDB supports JSONB but not JSON. Third-party dialects that - are derived from ``PGDialect_asyncpg`` will no longer have to - work around ValueError being raised by trying to register a codec - for an unsupported type. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 21f9e3e26e7..2225a727860 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -878,8 +878,6 @@ class PGDialect_asyncpg(PGDialect): use_native_uuid = True - _exclude_type_codecs = util.EMPTY_SET - colspecs = util.update_copy( PGDialect.colspecs, { @@ -1034,34 +1032,21 @@ async def _setup_type_codecs(conn): See https://github.com/MagicStack/asyncpg/issues/623 for reference on why it's set up this way. - See #7284 for the rationale behind adding - self._exclude_type_codecs - """ - - if "json" not in self._exclude_type_codecs: - try: - await conn._connection.set_type_codec( - "json", - encoder=str.encode, - decoder=_json_decoder, - schema="pg_catalog", - format="binary", - ) - except ValueError: - self._exclude_type_codecs |= {"json"} - - if "jsonb" not in self._exclude_type_codecs: - try: - await conn._connection.set_type_codec( - "jsonb", - encoder=_jsonb_encoder, - decoder=_jsonb_decoder, - schema="pg_catalog", - format="binary", - ) - except ValueError: - self._exclude_type_codecs |= {"jsonb"} + await conn._connection.set_type_codec( + "json", + encoder=str.encode, + decoder=_json_decoder, + schema="pg_catalog", + format="binary", + ) + await conn._connection.set_type_codec( + "jsonb", + encoder=_jsonb_encoder, + decoder=_jsonb_decoder, + schema="pg_catalog", + format="binary", + ) def connect(conn): conn.await_(_setup_type_codecs(conn)) From eb6bddb160fd8ba1cf87b6c4548ec9a29c34c279 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 3 Nov 2021 22:47:51 +0100 Subject: [PATCH 010/632] add missing info from groupby documentation Change-Id: Icfaf242353c23a579fe79f9d72500a08d90fcb77 Signed-off-by: Federico Caselli --- lib/sqlalchemy/sql/selectable.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index aed6482972a..91436029706 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -3819,6 +3819,8 @@ def group_by(self, *clauses): r"""Return a new selectable with the given list of GROUP BY criterion applied. + All existing GROUP BY settings can be suppressed by passing ``None``. + e.g.:: stmt = select(table.c.name, func.max(table.c.stat)).\ From e09934443b45d58d775fa6f847ded49c18246f63 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Nov 2021 21:26:44 -0400 Subject: [PATCH 011/632] use ExpressionElementRole for case targets in case() Fixed regression where the :func:`_sql.text` construct would no longer be accepted as a target case in the "whens" list within a :func:`_sql.case` construct. The regression appears related to an attempt to guard against some forms of literal values that were considered to be ambiguous when passed here; however, there's no reason the target cases shouldn't be interpreted as open-ended SQL expressions just like anywhere else, and a literal string or tuple will be converted to a bound parameter as would be the case elsewhere. Fixes: #7287 Change-Id: I75478adfa115f3292cb1362cc5b2fdf152b0ed6f (cherry picked from commit 77a17797ecc08736ea942e29f79df4f96bd74e0c) --- doc/build/changelog/unreleased_14/7287.rst | 12 ++++ lib/sqlalchemy/sql/elements.py | 34 ++++------ test/sql/test_case_statement.py | 78 +++++++++++++++++----- test/sql/test_text.py | 9 +++ 4 files changed, 96 insertions(+), 37 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7287.rst diff --git a/doc/build/changelog/unreleased_14/7287.rst b/doc/build/changelog/unreleased_14/7287.rst new file mode 100644 index 00000000000..14c72a8aff1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7287.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, sql, regression + :tickets: 7287 + + Fixed regression where the :func:`_sql.text` construct would no longer be + accepted as a target case in the "whens" list within a :func:`_sql.case` + construct. The regression appears related to an attempt to guard against + some forms of literal values that were considered to be ambiguous when + passed here; however, there's no reason the target cases shouldn't be + interpreted as open-ended SQL expressions just like anywhere else, and a + literal string or tuple will be converted to a bound parameter as would be + the case elsewhere. \ No newline at end of file diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index e883454de65..a276e2cae1e 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -2943,28 +2943,18 @@ def __init__(self, *whens, **kw): pass value = kw.pop("value", None) - if value is not None: - whenlist = [ - ( - coercions.expect( - roles.ExpressionElementRole, - c, - apply_propagate_attrs=self, - ).self_group(), - coercions.expect(roles.ExpressionElementRole, r), - ) - for (c, r) in whens - ] - else: - whenlist = [ - ( - coercions.expect( - roles.ColumnArgumentRole, c, apply_propagate_attrs=self - ).self_group(), - coercions.expect(roles.ExpressionElementRole, r), - ) - for (c, r) in whens - ] + + whenlist = [ + ( + coercions.expect( + roles.ExpressionElementRole, + c, + apply_propagate_attrs=self, + ).self_group(), + coercions.expect(roles.ExpressionElementRole, r), + ) + for (c, r) in whens + ] if whenlist: type_ = list(whenlist[-1])[-1].type diff --git a/test/sql/test_case_statement.py b/test/sql/test_case_statement.py index c6d5f0185ba..c676315b2c5 100644 --- a/test/sql/test_case_statement.py +++ b/test/sql/test_case_statement.py @@ -2,7 +2,7 @@ from sqlalchemy import case from sqlalchemy import cast from sqlalchemy import Column -from sqlalchemy import exc +from sqlalchemy import func from sqlalchemy import Integer from sqlalchemy import literal_column from sqlalchemy import MetaData @@ -13,7 +13,6 @@ from sqlalchemy import text from sqlalchemy.sql import column from sqlalchemy.sql import table -from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -126,23 +125,62 @@ def test_case(self, connection): ], ) - def test_literal_interpretation_ambiguous(self): - assert_raises_message( - exc.ArgumentError, - r"Column expression expected, got 'x'", - case, - ("x", "y"), + def test_literal_interpretation_one(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case(("x", "y")), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": "x", "param_2": "y"}, ) - def test_literal_interpretation_ambiguous_tuple(self): - assert_raises_message( - exc.ArgumentError, - r"Column expression expected, got \('x', 'y'\)", - case, - (("x", "y"), "z"), + def test_literal_interpretation_two(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case( + (("x", "y"), "z"), + ), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": ("x", "y"), "param_2": "z"}, ) - def test_literal_interpretation(self): + def test_literal_interpretation_two_point_five(self): + """note this is modified as of #7287 to accept strings, tuples + and other literal values as input + where they are interpreted as bound values just like any other + expression. + + Previously, an exception would be raised that the literal was + ambiguous. + + + """ + self.assert_compile( + case( + (12, "z"), + ), + "CASE WHEN :param_1 THEN :param_2 END", + checkparams={"param_1": 12, "param_2": "z"}, + ) + + def test_literal_interpretation_three(self): t = table("test", column("col1")) self.assert_compile( @@ -221,6 +259,16 @@ def test_text_doesnt_explode(self, connection): [("no",), ("no",), ("no",), ("yes",), ("no",), ("no",)], ) + def test_text_doenst_explode_even_in_whenlist(self): + """test #7287""" + self.assert_compile( + case( + (text(":case = 'upper'"), func.upper(literal_column("q"))), + else_=func.lower(literal_column("q")), + ), + "CASE WHEN :case = 'upper' THEN upper(q) ELSE lower(q) END", + ) + def testcase_with_dict(self): query = select( case( diff --git a/test/sql/test_text.py b/test/sql/test_text.py index 15f6f604861..4fff0ed7ef3 100644 --- a/test/sql/test_text.py +++ b/test/sql/test_text.py @@ -6,6 +6,7 @@ from sqlalchemy import Column from sqlalchemy import desc from sqlalchemy import exc +from sqlalchemy import extract from sqlalchemy import Float from sqlalchemy import func from sqlalchemy import Integer @@ -182,6 +183,14 @@ def test_select_composition_eight(self): "(select f from bar where lala=heyhey) foo WHERE foo.f = t.id", ) + def test_expression_element_role(self): + """test #7287""" + + self.assert_compile( + extract("year", text("some_date + :param")), + "EXTRACT(year FROM some_date + :param)", + ) + @testing.combinations( ( None, From 482fe81f65c14f93e03d4d300c4797658d5c76db Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Nov 2021 11:32:51 -0400 Subject: [PATCH 012/632] simplify and publicize the asyncpg JSON(B) codec registrsation Added overridable methods ``PGDialect_asyncpg.setup_asyncpg_json_codec`` and ``PGDialect_asyncpg.setup_asyncpg_jsonb_codec`` codec, which handle the required task of registering JSON/JSONB codecs for these datatypes when using asyncpg. The change is that methods are broken out as individual, overridable methods to support third party dialects that need to alter or disable how these particular codecs are set up. Fixes: #7284 Change-Id: I3eac258fea61f3975bd03c428747f788813ce45e (cherry picked from commit 8bd8f6c5aa1e85907b1517a57a91997532f3ebd7) --- doc/build/changelog/unreleased_14/7284.rst | 13 +++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 84 ++++++++++++------- test/dialect/postgresql/test_async_pg_py3k.py | 20 +++++ 3 files changed, 88 insertions(+), 29 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7284.rst diff --git a/doc/build/changelog/unreleased_14/7284.rst b/doc/build/changelog/unreleased_14/7284.rst new file mode 100644 index 00000000000..b5d23739c8f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7284.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: postgresql, usecase, asyncpg + :tickets: 7284 + :versions: 2.0.0b1 + + Added overridable methods ``PGDialect_asyncpg.setup_asyncpg_json_codec`` + and ``PGDialect_asyncpg.setup_asyncpg_jsonb_codec`` codec, which handle the + required task of registering JSON/JSONB codecs for these datatypes when + using asyncpg. The change is that methods are broken out as individual, + overridable methods to support third party dialects that need to alter or + disable how these particular codecs are set up. + + diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 2225a727860..fedc0b495b4 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1003,8 +1003,42 @@ def do_set_input_sizes(self, cursor, list_of_tuples, context): } ) - def on_connect(self): - super_connect = super(PGDialect_asyncpg, self).on_connect() + async def setup_asyncpg_json_codec(self, conn): + """set up JSON codec for asyncpg. + + This occurs for all new connections and + can be overridden by third party dialects. + + .. versionadded:: 1.4.27 + + """ + + asyncpg_connection = conn._connection + deserializer = self._json_deserializer or _py_json.loads + + def _json_decoder(bin_value): + return deserializer(bin_value.decode()) + + await asyncpg_connection.set_type_codec( + "json", + encoder=str.encode, + decoder=_json_decoder, + schema="pg_catalog", + format="binary", + ) + + async def setup_asyncpg_jsonb_codec(self, conn): + """set up JSONB codec for asyncpg. + + This occurs for all new connections and + can be overridden by third party dialects. + + .. versionadded:: 1.4.27 + + """ + + asyncpg_connection = conn._connection + deserializer = self._json_deserializer or _py_json.loads def _jsonb_encoder(str_value): # \x01 is the prefix for jsonb used by PostgreSQL. @@ -1013,43 +1047,35 @@ def _jsonb_encoder(str_value): deserializer = self._json_deserializer or _py_json.loads - def _json_decoder(bin_value): - return deserializer(bin_value.decode()) - def _jsonb_decoder(bin_value): # the byte is the \x01 prefix for jsonb used by PostgreSQL. # asyncpg returns it when format='binary' return deserializer(bin_value[1:].decode()) - async def _setup_type_codecs(conn): - """set up type decoders at the asyncpg level. + await asyncpg_connection.set_type_codec( + "jsonb", + encoder=_jsonb_encoder, + decoder=_jsonb_decoder, + schema="pg_catalog", + format="binary", + ) - these are set_type_codec() calls to normalize - There was a tentative decoder for the "char" datatype here - to have it return strings however this type is actually a binary - type that other drivers are likely mis-interpreting. + def on_connect(self): + """on_connect for asyncpg - See https://github.com/MagicStack/asyncpg/issues/623 for reference - on why it's set up this way. + A major component of this for asyncpg is to set up type decoders at the + asyncpg level. - """ - await conn._connection.set_type_codec( - "json", - encoder=str.encode, - decoder=_json_decoder, - schema="pg_catalog", - format="binary", - ) - await conn._connection.set_type_codec( - "jsonb", - encoder=_jsonb_encoder, - decoder=_jsonb_decoder, - schema="pg_catalog", - format="binary", - ) + See https://github.com/MagicStack/asyncpg/issues/623 for + notes on JSON/JSONB implementation. + + """ + + super_connect = super(PGDialect_asyncpg, self).on_connect() def connect(conn): - conn.await_(_setup_type_codecs(conn)) + conn.await_(self.setup_asyncpg_json_codec(conn)) + conn.await_(self.setup_asyncpg_jsonb_codec(conn)) if super_connect is not None: super_connect(conn) diff --git a/test/dialect/postgresql/test_async_pg_py3k.py b/test/dialect/postgresql/test_async_pg_py3k.py index 62c8f5dde98..12917e97663 100644 --- a/test/dialect/postgresql/test_async_pg_py3k.py +++ b/test/dialect/postgresql/test_async_pg_py3k.py @@ -13,6 +13,7 @@ from sqlalchemy.testing import async_test from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock class AsyncPgTest(fixtures.TestBase): @@ -251,3 +252,22 @@ async def test_failed_rollback_recover( await conn.begin() await conn.rollback() + + @testing.combinations( + "setup_asyncpg_json_codec", + "setup_asyncpg_jsonb_codec", + argnames="methname", + ) + @async_test + async def test_codec_registration( + self, metadata, async_testing_engine, methname + ): + """test new hooks added for #7284""" + + engine = async_testing_engine() + with mock.patch.object(engine.dialect, methname) as codec_meth: + conn = await engine.connect() + adapted_conn = (await conn.get_raw_connection()).connection + await conn.close() + + eq_(codec_meth.mock_calls, [mock.call(adapted_conn)]) From 71feeb37ca8e6726327aa1f24cf7de458dea6f82 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Nov 2021 13:36:43 -0400 Subject: [PATCH 013/632] Update "transaction has already begun" language As future connections will now be autobeginning, there will be more cases where begin() can't be called as well as where isolation level can't be set, which will be surprising as this is a behavioral change for 2.0; additionally, when DBAPI autocommit is set, there isn't actually a DBAPI level transaction in effect even though Connection has a Transaction object. Clarify the language in these two error messages to make it clear that begin() and autobegin are tracking a SQLAlchemy-level Transaction() object, whether or not the DBAPI has actually started a transaction, and that this is the reason rollback() or commit() is required before performing the requsted operation. Additionally make sure the error message mentions "autobegin" as a likely reason this error is being encountered along with what Connection needs the user to do in order to resolve. Change-Id: If8763939eeabc46aa9d9209a56d05ad82b892c5c (cherry picked from commit fee0855bfe2982927ab21ce7398fa48b90af7ca4) --- lib/sqlalchemy/engine/base.py | 5 ++++- lib/sqlalchemy/engine/default.py | 6 ++++-- test/engine/test_transaction.py | 17 ++++++++++++----- 3 files changed, 20 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 2444b5c7fe1..0c27ea6d914 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -764,7 +764,10 @@ def begin(self): else: if self._is_future: raise exc.InvalidRequestError( - "a transaction is already begun for this connection" + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; can't " + "call begin() here unless rollback() or commit() " + "is called first." ) else: return MarkerTransaction(self) diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 75bca190502..1adb8861745 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -631,8 +631,10 @@ def _set_connection_characteristics(self, connection, characteristics): if trans_objs: if connection._is_future: raise exc.InvalidRequestError( - "This connection has already begun a transaction; " - "%s may not be altered until transaction end" + "This connection has already initialized a SQLAlchemy " + "Transaction() object via begin() or autobegin; " + "%s may not be altered unless rollback() or commit() " + "is called first." % (", ".join(name for name, obj in trans_objs)) ) else: diff --git a/test/engine/test_transaction.py b/test/engine/test_transaction.py index b8e7edc6522..9e614202237 100644 --- a/test/engine/test_transaction.py +++ b/test/engine/test_transaction.py @@ -1568,8 +1568,10 @@ def test_no_autocommit_w_begin(self): with testing.db.begin() as conn: assert_raises_message( exc.InvalidRequestError, - "This connection has already begun a transaction; " - "isolation_level may not be altered until transaction end", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; " + r"isolation_level may not be altered unless rollback\(\) or " + r"commit\(\) is called first.", conn.execution_options, isolation_level="AUTOCOMMIT", ) @@ -1582,8 +1584,10 @@ def test_no_autocommit_w_autobegin(self): assert_raises_message( exc.InvalidRequestError, - "This connection has already begun a transaction; " - "isolation_level may not be altered until transaction end", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; " + r"isolation_level may not be altered unless rollback\(\) or " + r"commit\(\) is called first.", conn.execution_options, isolation_level="AUTOCOMMIT", ) @@ -1822,7 +1826,10 @@ def test_no_double_begin(self): assert_raises_message( exc.InvalidRequestError, - "a transaction is already begun for this connection", + r"This connection has already initialized a SQLAlchemy " + r"Transaction\(\) object via begin\(\) or autobegin; can't " + r"call begin\(\) here unless rollback\(\) or commit\(\) is " + r"called first.", conn.begin, ) From cc7878dc4ae49da1444b2115cd49ebadaf17a60c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Nov 2021 17:02:24 -0400 Subject: [PATCH 014/632] Check for Mapping explicitly in 2.0 params Fixed issue in future :class:`_future.Connection` object where the :meth:`_future.Connection.execute` method would not accept a non-dict mapping object, such as SQLAlchemy's own :class:`.RowMapping` or other ``abc.collections.Mapping`` object as a parameter dictionary. Fixes: #7291 Change-Id: I819f079d86d19d1d81c570e0680f987e51e34b84 (cherry picked from commit 248d232459e38561999c4172acaaddd651c1a933) --- doc/build/changelog/unreleased_14/7291.rst | 8 ++ lib/sqlalchemy/engine/util.py | 6 +- test/engine/test_execute.py | 104 +++++++++++++++++++++ 3 files changed, 115 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7291.rst diff --git a/doc/build/changelog/unreleased_14/7291.rst b/doc/build/changelog/unreleased_14/7291.rst new file mode 100644 index 00000000000..add383ee861 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7291.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, engine + :tickets: 7291 + + Fixed issue in future :class:`_future.Connection` object where the + :meth:`_future.Connection.execute` method would not accept a non-dict + mapping object, such as SQLAlchemy's own :class:`.RowMapping` or other + ``abc.collections.Mapping`` object as a parameter dictionary. diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index 4f2e031ab74..8eb0f182085 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -147,9 +147,9 @@ def _distill_params_20(params): elif isinstance( params, (tuple, dict, immutabledict), - # avoid abc.__instancecheck__ - # (collections_abc.Sequence, collections_abc.Mapping), - ): + # only do abc.__instancecheck__ for Mapping after we've checked + # for plain dictionaries and would otherwise raise + ) or isinstance(params, collections_abc.Mapping): return (params,), _no_kw else: raise exc.ArgumentError("mapping or sequence expected for parameters") diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index 22731f5d082..cbc2fd1e9b9 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -264,6 +264,58 @@ def test_raw_named(self, connection): (4, "sally"), ] + def test_non_dict_mapping(self, connection): + """ensure arbitrary Mapping works for execute()""" + + class NotADict(collections_abc.Mapping): + def __init__(self, _data): + self._data = _data + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self): + return self._data.keys() + + nd = NotADict({"a": 10, "b": 15}) + eq_(dict(nd), {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer), bindparam("b", type_=Integer) + ), + nd, + ) + eq_(result.first(), (10, 15)) + + def test_row_works_as_mapping(self, connection): + """ensure the RowMapping object works as a parameter dictionary for + execute.""" + + result = connection.execute( + select(literal(10).label("a"), literal(15).label("b")) + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer).label("a"), + bindparam("b", type_=Integer).label("b"), + ), + row._mapping, + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + def test_dialect_has_table_assertion(self): with expect_raises_message( tsa.exc.ArgumentError, @@ -3515,6 +3567,58 @@ def define_tables(cls, metadata): test_needs_acid=True, ) + def test_non_dict_mapping(self, connection): + """ensure arbitrary Mapping works for execute()""" + + class NotADict(collections_abc.Mapping): + def __init__(self, _data): + self._data = _data + + def __iter__(self): + return iter(self._data) + + def __len__(self): + return len(self._data) + + def __getitem__(self, key): + return self._data[key] + + def keys(self): + return self._data.keys() + + nd = NotADict({"a": 10, "b": 15}) + eq_(dict(nd), {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer), bindparam("b", type_=Integer) + ), + nd, + ) + eq_(result.first(), (10, 15)) + + def test_row_works_as_mapping(self, connection): + """ensure the RowMapping object works as a parameter dictionary for + execute.""" + + result = connection.execute( + select(literal(10).label("a"), literal(15).label("b")) + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + + result = connection.execute( + select( + bindparam("a", type_=Integer).label("a"), + bindparam("b", type_=Integer).label("b"), + ), + row._mapping, + ) + row = result.first() + eq_(row, (10, 15)) + eq_(row._mapping, {"a": 10, "b": 15}) + @testing.combinations( ({}, {}, {}), ({"a": "b"}, {}, {"a": "b"}), From 7166159bffc30f3dbb3da8495cc5756c29b9a457 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 5 Nov 2021 09:58:18 +0100 Subject: [PATCH 015/632] fix typo in changelog. Change-Id: Ic78a9ce9032ab759fc796d3218b64352dde6155b --- doc/build/changelog/unreleased_14/7283.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7283.rst b/doc/build/changelog/unreleased_14/7283.rst index ce16f0561a8..4fc86b4ca97 100644 --- a/doc/build/changelog/unreleased_14/7283.rst +++ b/doc/build/changelog/unreleased_14/7283.rst @@ -4,6 +4,6 @@ Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" PostgreSQL type instead of "numeric" so that the value ``float(inf)`` can - be accommodated. Added test suite support for persisence of the "inf" + be accommodated. Added test suite support for persistence of the "inf" value. From 9ee47d90d804dc815685d42913ad170e04c38659 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Nov 2021 10:18:42 -0400 Subject: [PATCH 016/632] use tuple expansion if type._is_tuple, test for Sequence if no type Fixed regression where the row objects returned for ORM queries, which are now the normal :class:`_sql.Row` objects, would not be interpreted by the :meth:`_sql.ColumnOperators.in_` operator as tuple values to be broken out into individual bound parameters, and would instead pass them as single values to the driver leading to failures. The change to the "expanding IN" system now accommodates for the expression already being of type :class:`.TupleType` and treats values accordingly if so. In the uncommon case of using "tuple-in" with an untyped statement such as a textual statement with no typing information, a tuple value is detected for values that implement ``collections.abc.Sequence``, but that are not ``str`` or ``bytes``, as always when testing for ``Sequence``. Added :class:`.TupleType` to the top level ``sqlalchemy`` import namespace. Fixes: #7292 Change-Id: I8286387e3b3c3752b3bd4ae3560d4f31172acc22 (cherry picked from commit 0c44a1e77cfde0f841a4a64140314c6b833efdab) --- doc/build/changelog/unreleased_14/7292.rst | 20 ++++ lib/sqlalchemy/__init__.py | 1 + lib/sqlalchemy/sql/compiler.py | 21 ++-- lib/sqlalchemy/sql/sqltypes.py | 5 +- lib/sqlalchemy/testing/suite/test_select.py | 104 ++++++++++++++++++++ lib/sqlalchemy/types.py | 2 + lib/sqlalchemy/util/__init__.py | 1 + test/sql/test_lambdas.py | 5 +- test/sql/test_resultset.py | 32 ++++++ 9 files changed, 182 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7292.rst diff --git a/doc/build/changelog/unreleased_14/7292.rst b/doc/build/changelog/unreleased_14/7292.rst new file mode 100644 index 00000000000..e75d11e813f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7292.rst @@ -0,0 +1,20 @@ +.. change:: + :tags: bug, sql, regression + :tickets: 7292 + + Fixed regression where the row objects returned for ORM queries, which are + now the normal :class:`_sql.Row` objects, would not be interpreted by the + :meth:`_sql.ColumnOperators.in_` operator as tuple values to be broken out + into individual bound parameters, and would instead pass them as single + values to the driver leading to failures. The change to the "expanding IN" + system now accommodates for the expression already being of type + :class:`.TupleType` and treats values accordingly if so. In the uncommon + case of using "tuple-in" with an untyped statement such as a textual + statement with no typing information, a tuple value is detected for values + that implement ``collections.abc.Sequence``, but that are not ``str`` or + ``bytes``, as always when testing for ``Sequence``. + +.. change:: + :tags: usecase, sql + + Added :class:`.TupleType` to the top level ``sqlalchemy`` import namespace. \ No newline at end of file diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index ad6d96fdd3e..d5cc233243b 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -123,6 +123,7 @@ from .types import TIME from .types import Time from .types import TIMESTAMP +from .types import TupleType from .types import TypeDecorator from .types import Unicode from .types import UnicodeText diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 266452851bc..7db8d6b5d6a 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2025,8 +2025,14 @@ def _literal_execute_expanding_parameter_literal_binds( [parameter.type], parameter.expand_op ) - elif isinstance(values[0], (tuple, list)): - assert typ_dialect_impl._is_tuple_type + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], util.collections_abc.Sequence) + and not isinstance( + values[0], util.string_types + util.binary_types + ) + ): + replacement_expression = ( "VALUES " if self.dialect.tuple_in_values else "" ) + ", ".join( @@ -2042,7 +2048,6 @@ def _literal_execute_expanding_parameter_literal_binds( for i, tuple_element in enumerate(values) ) else: - assert not typ_dialect_impl._is_tuple_type replacement_expression = ", ".join( self.render_literal_value(value, parameter.type) for value in values @@ -2071,10 +2076,14 @@ def _literal_execute_expanding_parameter(self, name, parameter, values): [parameter.type], parameter.expand_op ) - elif ( - isinstance(values[0], (tuple, list)) - and not typ_dialect_impl._is_array + elif typ_dialect_impl._is_tuple_type or ( + typ_dialect_impl._isnull + and isinstance(values[0], util.collections_abc.Sequence) + and not isinstance( + values[0], util.string_types + util.binary_types + ) ): + assert not typ_dialect_impl._is_array to_update = [ ("%s_%s_%s" % (name, i, j), value) for i, tuple_element in enumerate(values, 1) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index ae589d648a8..3f3801ab009 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2966,7 +2966,10 @@ class TupleType(TypeEngine): def __init__(self, *types): self._fully_typed = NULLTYPE not in types - self.types = types + self.types = [ + item_type() if isinstance(item_type, type) else item_type + for item_type in types + ] def _resolve_values_to_types(self, value): if self._fully_typed: diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index a3475f651b4..3e3ad04a782 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -30,11 +30,13 @@ from ... import text from ... import true from ... import tuple_ +from ... import TupleType from ... import union from ... import util from ... import values from ...exc import DatabaseError from ...exc import ProgrammingError +from ...util import collections_abc class CollateTest(fixtures.TablesTest): @@ -1131,6 +1133,41 @@ def test_empty_in_plus_notempty_notin(self): ) self._assert_result(stmt, []) + def test_typed_str_in(self): + """test related to #7292. + + as a type is given to the bound param, there is no ambiguity + to the type of element. + + """ + + stmt = text( + "select id FROM some_table WHERE z IN :q ORDER BY id" + ).bindparams(bindparam("q", type_=String, expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={"q": ["z2", "z3", "z4"]}, + ) + + def test_untyped_str_in(self): + """test related to #7292. + + for untyped expression, we look at the types of elements. + Test for Sequence to detect tuple in. but not strings or bytes! + as always.... + + """ + + stmt = text( + "select id FROM some_table WHERE z IN :q ORDER BY id" + ).bindparams(bindparam("q", expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={"q": ["z2", "z3", "z4"]}, + ) + @testing.requires.tuple_in def test_bound_in_two_tuple_bindparam(self): table = self.tables.some_table @@ -1197,6 +1234,73 @@ def test_bound_in_heterogeneous_two_tuple_text_bindparam(self): params={"q": [(2, "z2"), (3, "z3"), (4, "z4")]}, ) + @testing.requires.tuple_in + def test_bound_in_heterogeneous_two_tuple_typed_bindparam_non_tuple(self): + class LikeATuple(collections_abc.Sequence): + def __init__(self, *data): + self._data = data + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, idx): + return self._data[idx] + + def __len__(self): + return len(self._data) + + stmt = text( + "select id FROM some_table WHERE (x, z) IN :q ORDER BY id" + ).bindparams( + bindparam( + "q", type_=TupleType(Integer(), String()), expanding=True + ) + ) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={ + "q": [ + LikeATuple(2, "z2"), + LikeATuple(3, "z3"), + LikeATuple(4, "z4"), + ] + }, + ) + + @testing.requires.tuple_in + def test_bound_in_heterogeneous_two_tuple_text_bindparam_non_tuple(self): + # note this becomes ARRAY if we dont use expanding + # explicitly right now + + class LikeATuple(collections_abc.Sequence): + def __init__(self, *data): + self._data = data + + def __iter__(self): + return iter(self._data) + + def __getitem__(self, idx): + return self._data[idx] + + def __len__(self): + return len(self._data) + + stmt = text( + "select id FROM some_table WHERE (x, z) IN :q ORDER BY id" + ).bindparams(bindparam("q", expanding=True)) + self._assert_result( + stmt, + [(2,), (3,), (4,)], + params={ + "q": [ + LikeATuple(2, "z2"), + LikeATuple(3, "z3"), + LikeATuple(4, "z4"), + ] + }, + ) + def test_empty_set_against_integer_bindparam(self): table = self.tables.some_table stmt = ( diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index ecc351fc948..df8abdc6944 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -36,6 +36,7 @@ "INTEGER", "DATE", "TIME", + "TupleType", "String", "Integer", "SmallInteger", @@ -103,6 +104,7 @@ from .sql.sqltypes import TIME from .sql.sqltypes import Time from .sql.sqltypes import TIMESTAMP +from .sql.sqltypes import TupleType from .sql.sqltypes import Unicode from .sql.sqltypes import UnicodeText from .sql.sqltypes import VARBINARY diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index bdd69431e0f..e4e79294f20 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -53,6 +53,7 @@ from .compat import b64decode from .compat import b64encode from .compat import binary_type +from .compat import binary_types from .compat import byte_buffer from .compat import callable from .compat import cmp diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index 2e794d7bcf9..a53401a4f10 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -26,6 +26,7 @@ from sqlalchemy.testing import ne_ from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.types import ARRAY from sqlalchemy.types import Boolean from sqlalchemy.types import Integer from sqlalchemy.types import String @@ -1180,9 +1181,9 @@ def go(names): def test_in_parameters_five(self): def go(n1, n2): stmt = lambdas.lambda_stmt( - lambda: select(1).where(column("q").in_(n1)) + lambda: select(1).where(column("q", ARRAY(String)).in_(n1)) ) - stmt += lambda s: s.where(column("y").in_(n2)) + stmt += lambda s: s.where(column("y", ARRAY(String)).in_(n2)) return stmt expr = go(["a", "b", "c"], ["d", "e", "f"]) diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index c02b3cbc1b6..d07f81facee 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -21,6 +21,7 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import true +from sqlalchemy import tuple_ from sqlalchemy import type_coerce from sqlalchemy import TypeDecorator from sqlalchemy import util @@ -775,6 +776,37 @@ def test_row_as_args(self, connection): connection.execute(users.insert(), r._mapping) eq_(connection.execute(users.select()).fetchall(), [(1, "john")]) + @testing.requires.tuple_in + def test_row_tuple_interpretation(self, connection): + """test #7292""" + users = self.tables.users + + connection.execute( + users.insert(), + [ + dict(user_id=1, user_name="u1"), + dict(user_id=2, user_name="u2"), + dict(user_id=3, user_name="u3"), + ], + ) + rows = connection.execute( + select(users.c.user_id, users.c.user_name) + ).all() + + # was previously needed + # rows = [(x, y) for x, y in rows] + + new_stmt = ( + select(users) + .where(tuple_(users.c.user_id, users.c.user_name).in_(rows)) + .order_by(users.c.user_id) + ) + + eq_( + connection.execute(new_stmt).all(), + [(1, "u1"), (2, "u2"), (3, "u3")], + ) + def test_result_as_args(self, connection): users = self.tables.users users2 = self.tables.users2 From 1a9487accf0461cf489ae0a124384232cbe73a73 Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Fri, 5 Nov 2021 12:38:30 -0400 Subject: [PATCH 017/632] Fixes: #7295 Fixed issue in ``Table``` object where: param:`implicit_returning` was not compatible with: param:`extend_existing`. (cherry picked from commit f5836f29f5612d5f653683644566a57c47291b5d) Change-Id: I16f4ab585d82f5691a3fed9eba04b84730a8a59e --- doc/build/changelog/unreleased_14/7295.rst | 9 ++++++++ lib/sqlalchemy/sql/schema.py | 16 +++++++------- test/sql/test_metadata.py | 25 ++++++++++++++++++++++ 3 files changed, 42 insertions(+), 8 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7295.rst diff --git a/doc/build/changelog/unreleased_14/7295.rst b/doc/build/changelog/unreleased_14/7295.rst new file mode 100644 index 00000000000..058c9d16a58 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7295.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, schema + :tickets: 7295 + + Fixed issue in :class:`.Table` where the + :paramref:`.Table.implicit_returning` parameter would not be + accommodated correctly when passed along with + :paramref:`.Table.extend_existing` to augment an existing + :class:`.Table`. diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 166ad98cd89..2ee66460e89 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -763,25 +763,25 @@ def _init_existing(self, *args, **kwargs): ) include_columns = kwargs.pop("include_columns", None) - - resolve_fks = kwargs.pop("resolve_fks", True) - if include_columns is not None: for c in self.c: if c.name not in include_columns: self._columns.remove(c) + resolve_fks = kwargs.pop("resolve_fks", True) + for key in ("quote", "quote_schema"): if key in kwargs: raise exc.ArgumentError( "Can't redefine 'quote' or 'quote_schema' arguments" ) - if "comment" in kwargs: - self.comment = kwargs.pop("comment", None) - - if "info" in kwargs: - self.info = kwargs.pop("info") + # update `self` with these kwargs, if provided + self.comment = kwargs.pop("comment", self.comment) + self.implicit_returning = kwargs.pop( + "implicit_returning", self.implicit_returning + ) + self.info = kwargs.pop("info", self.info) if autoload: if not autoload_replace: diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 08502b8bbeb..bd921364795 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -1923,6 +1923,31 @@ def test_must_exist(self): ): Table("foo", MetaData(), must_exist=True) + @testing.combinations( + ("comment", ("A", "B", "A")), + ("implicit_returning", (True, False, True)), + ("info", ({"A": 1}, {"A": 2}, {"A": 1})), + ) + def test_extend_attributes(self, attrib, attrib_values): + """ + ensure `extend_existing` is compatible with simple attributes + """ + metadata = MetaData() + for counter, _attrib_value in enumerate(attrib_values): + _extend_existing = True if (counter > 0) else False + _kwargs = { + "extend_existing": _extend_existing, + attrib: _attrib_value, + } + table_a = Table( + "a", + metadata, + Column("foo", String, primary_key=True), + **_kwargs + ) + eq_(getattr(table_a, attrib), _attrib_value) + eq_(getattr(metadata.tables["a"], attrib), _attrib_value) + class PKAutoIncrementTest(fixtures.TestBase): def test_multi_integer_no_autoinc(self): From 8fe9984637b79f582728d4767e973567704b1d37 Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Mon, 27 Sep 2021 12:51:32 -0400 Subject: [PATCH 018/632] Fixes: #4390 Deprecated an undocumented loader option syntax ``".*"``, which appears to be no different than passing a single asterisk, and will emit a deprecation warning if used. This syntax may have been intended for something but there is currently no need for it. The original ticket was to document the `.{WILDCARD}` (e.g. `.*`) format, however this format does not appear to be used or needed by SQLAlchemy and is likely not used by any projects or developers. This PR invokes `util.warn_deprecated` to notify users this functionality is deprecated, and directs them to the #4390 issue if they actually use it. Assuming there are no complaints over this warning in the coming months, this code can be removed in a future major release. Change-Id: I665e3ac26be0a7819246a2ee56fb5a5f32980c91 (cherry picked from commit 2432d2ed0b28480c0e1004a47aa74238865105b5) --- doc/build/changelog/unreleased_14/4390.rst | 9 +++ lib/sqlalchemy/orm/strategy_options.py | 9 +++ test/orm/test_default_strategies.py | 4 +- test/orm/test_deferred.py | 5 +- test/orm/test_deprecations.py | 93 ++++++++++++++++++++++ 5 files changed, 114 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/4390.rst diff --git a/doc/build/changelog/unreleased_14/4390.rst b/doc/build/changelog/unreleased_14/4390.rst new file mode 100644 index 00000000000..abbc664ee8e --- /dev/null +++ b/doc/build/changelog/unreleased_14/4390.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: deprecated, orm + :tickets: 4390 + + Deprecated an undocumented loader option syntax ``".*"``, which appears to + be no different than passing a single asterisk, and will emit a deprecation + warning if used. This syntax may have been intended for something but there + is currently no need for it. + diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 675c7218bd6..30286c1d809 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -919,6 +919,15 @@ def _split_key(key): return (_DEFAULT_TOKEN,) # coerce fooload(".*") into "wildcard on default entity" elif key.startswith("." + _WILDCARD_TOKEN): + util.warn_deprecated( + "The undocumented `.{WILDCARD}` format is deprecated " + "and will be removed in a future version as it is " + "believed to be unused. " + "If you have been using this functionality, please " + "comment on Issue #4390 on the SQLAlchemy project " + "tracker.", + version="1.4", + ) key = key[1:] return key.split(".") else: diff --git a/test/orm/test_default_strategies.py b/test/orm/test_default_strategies.py index 9b228bbaa25..9162d63ecda 100644 --- a/test/orm/test_default_strategies.py +++ b/test/orm/test_default_strategies.py @@ -437,7 +437,7 @@ def test_joined_path_wildcards(self): def go(): users[:] = ( sess.query(User) - .options(joinedload(".*")) + .options(joinedload("*")) .options(defaultload(User.addresses).joinedload("*")) .options(defaultload(User.orders).joinedload("*")) .options( @@ -548,7 +548,7 @@ def test_subquery_path_wildcards(self): def go(): users[:] = ( sess.query(User) - .options(subqueryload(".*")) + .options(subqueryload("*")) .options(defaultload(User.addresses).subqueryload("*")) .options(defaultload(User.orders).subqueryload("*")) .options( diff --git a/test/orm/test_deferred.py b/test/orm/test_deferred.py index bfdfb00b7fd..41e9fa4dda3 100644 --- a/test/orm/test_deferred.py +++ b/test/orm/test_deferred.py @@ -1563,14 +1563,11 @@ def test_load_only_subclass_from_relationship_bound(self): def test_defer_on_wildcard_subclass(self): # pretty much the same as load_only except doesn't # exclude the primary key - - # TODO: what is ".*"? this is not documented anywhere, how did this - # get implemented without docs ? see #4390 s = fixture_session() q = ( s.query(Manager) .order_by(Person.person_id) - .options(defer(".*"), undefer(Manager.status)) + .options(defer("*"), undefer(Manager.status)) ) self.assert_compile( q, diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 692a29b3069..ca5870fee63 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -90,8 +90,14 @@ from .inheritance._poly_fixtures import _Polymorphic from .inheritance._poly_fixtures import Company from .inheritance._poly_fixtures import Engineer +from .inheritance._poly_fixtures import Manager +from .inheritance._poly_fixtures import Person from .test_ac_relationships import PartitionByFixture from .test_bind import GetBindTest as _GetBindTest +from .test_default_strategies import ( + DefaultStrategyOptionsTest as _DefaultStrategyOptionsTest, +) +from .test_deferred import InheritanceTest as _deferred_InheritanceTest from .test_dynamic import _DynamicFixture from .test_events import _RemoveListeners from .test_options import PathTest as OptionsPathTest @@ -166,6 +172,13 @@ r"The merge_result\(\) function is considered legacy as of the 1.x series" ) +dep_exc_wildcard = ( + r"The undocumented `.{WILDCARD}` format is deprecated and will be removed " + r"in a future version as it is believed to be unused. If you have been " + r"using this functionality, please comment on Issue #4390 on the " + r"SQLAlchemy project tracker." +) + def _aliased_join_warning(arg=None): return testing.expect_warnings( @@ -9578,3 +9591,83 @@ def kt(*x): [(x and x.id or None, y and y.id or None) for x, y in it], [(u1.id, u2.id), (u1.id, None), (u2.id, u3.id)], ) + + +class DefaultStrategyOptionsTest(_DefaultStrategyOptionsTest): + def test_joined_path_wildcards(self): + sess = self._upgrade_fixture() + users = [] + + User, Order, Item = self.classes("User", "Order", "Item") + + # test upgrade all to joined: 1 sql + def go(): + users[:] = ( + sess.query(User) + .options(joinedload(".*")) + .options(defaultload(User.addresses).joinedload("*")) + .options(defaultload(User.orders).joinedload("*")) + .options( + defaultload(User.orders) + .defaultload(Order.items) + .joinedload("*") + ) + .order_by(self.classes.User.id) + .all() + ) + + with assertions.expect_deprecated(dep_exc_wildcard): + self.assert_sql_count(testing.db, go, 1) + self._assert_fully_loaded(users) + + def test_subquery_path_wildcards(self): + sess = self._upgrade_fixture() + users = [] + + User, Order = self.classes("User", "Order") + + # test upgrade all to subquery: 1 sql + 4 relationships = 5 + def go(): + users[:] = ( + sess.query(User) + .options(subqueryload(".*")) + .options(defaultload(User.addresses).subqueryload("*")) + .options(defaultload(User.orders).subqueryload("*")) + .options( + defaultload(User.orders) + .defaultload(Order.items) + .subqueryload("*") + ) + .order_by(User.id) + .all() + ) + + with assertions.expect_deprecated(dep_exc_wildcard): + self.assert_sql_count(testing.db, go, 5) + + # verify everything loaded, with no additional sql needed + self._assert_fully_loaded(users) + + +class Deferred_InheritanceTest(_deferred_InheritanceTest): + def test_defer_on_wildcard_subclass(self): + # pretty much the same as load_only except doesn't + # exclude the primary key + + # what is ".*"? this is not documented anywhere, how did this + # get implemented without docs ? see #4390 + s = fixture_session() + with assertions.expect_deprecated(dep_exc_wildcard): + q = ( + s.query(Manager) + .order_by(Person.person_id) + .options(defer(".*"), undefer(Manager.status)) + ) + self.assert_compile( + q, + "SELECT managers.status AS managers_status " + "FROM people JOIN managers ON " + "people.person_id = managers.person_id ORDER BY people.person_id", + ) + # note this doesn't apply to "bound" loaders since they don't seem + # to have this ".*" featue. From 9ecae501de3246ba98a0047ed2422c0ec08f2746 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 9 Nov 2021 11:31:23 -0500 Subject: [PATCH 019/632] upgrade deferred loader to regular loader if refresh_state Fixed issue where deferred polymorphic loading of attributes from a joined-table inheritance subclass would fail to populate the attribute correctly if the :func:`_orm.load_only` option were used to originally exclude that attribute, in the case where the load_only were descending from a relationship loader option. The fix allows that other valid options such as ``defer(..., raiseload=True)`` etc. still function as expected. Fixes: #7304 Change-Id: I58b7ce7c450bcc52d2f0c9bfbcb4d747463ee9b2 (cherry picked from commit 52b3d6649525929ee1ec14487a2f007194ed741d) --- doc/build/changelog/unreleased_14/7304.rst | 10 ++ lib/sqlalchemy/orm/strategies.py | 21 ++- test/orm/inheritance/test_poly_loading.py | 166 ++++++++++++++++++++- 3 files changed, 195 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7304.rst diff --git a/doc/build/changelog/unreleased_14/7304.rst b/doc/build/changelog/unreleased_14/7304.rst new file mode 100644 index 00000000000..44d188a30ee --- /dev/null +++ b/doc/build/changelog/unreleased_14/7304.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 7304 + + Fixed issue where deferred polymorphic loading of attributes from a + joined-table inheritance subclass would fail to populate the attribute + correctly if the :func:`_orm.load_only` option were used to originally + exclude that attribute, in the case where the load_only were descending + from a relationship loader option. The fix allows that other valid options + such as ``defer(..., raiseload=True)`` etc. still function as expected. diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 2a283caad6e..71c4a697611 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -382,7 +382,26 @@ def create_row_processor( # dictionary. Normally, the DeferredColumnLoader.setup_query() # sets up that data in the "memoized_populators" dictionary # and "create_row_processor()" here is never invoked. - if not self.is_class_level: + + if ( + context.refresh_state + and context.query._compile_options._only_load_props + and self.key in context.query._compile_options._only_load_props + ): + self.parent_property._get_strategy( + (("deferred", False), ("instrument", True)) + ).create_row_processor( + context, + query_entity, + path, + loadopt, + mapper, + result, + adapter, + populators, + ) + + elif not self.is_class_level: if self.raiseload: set_deferred_for_local_state = ( self.parent_property._raise_column_loader diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 35822a29e9f..332f11214d4 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -1,23 +1,29 @@ -from sqlalchemy import Column +from sqlalchemy import exc from sqlalchemy import ForeignKey from sqlalchemy import Integer +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.orm import backref +from sqlalchemy.orm import defaultload from sqlalchemy.orm import joinedload +from sqlalchemy.orm import lazyload from sqlalchemy.orm import relationship from sqlalchemy.orm import selectin_polymorphic from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session from sqlalchemy.orm import with_polymorphic from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL +from sqlalchemy.testing import assertsql from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import EachOf from sqlalchemy.testing.assertsql import Or from sqlalchemy.testing.fixtures import fixture_session +from sqlalchemy.testing.schema import Column from ._poly_fixtures import _Polymorphic from ._poly_fixtures import Company from ._poly_fixtures import Engineer @@ -686,3 +692,161 @@ def no_opt(): result = no_opt() with self.assert_statement_count(testing.db, 1): eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])]) + + +class IgnoreOptionsOnSubclassAttrLoad(fixtures.DeclarativeMappedTest): + """test #7304 and related cases + + in this case we trigger the subclass attribute load, while at the same + time there will be a deferred loader option present in the state's + options that was established by the previous loader. + + test both that the option takes effect (i.e. raiseload) and that a deferred + loader doesn't interfere with the mapper's load of the attribute. + + """ + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Parent(Base): + __tablename__ = "parent" + + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + + entity_id = Column(ForeignKey("entity.id")) + entity = relationship("Entity") + + class Entity(Base): + __tablename__ = "entity" + + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + type = Column(String(32)) + + __mapper_args__ = { + "polymorphic_on": type, + "polymorphic_identity": "entity", + } + + class SubEntity(Entity): + __tablename__ = "sub_entity" + + id = Column(ForeignKey(Entity.id), primary_key=True) + + name = Column(String(32)) + + __mapper_args__ = {"polymorphic_identity": "entity_two"} + + @classmethod + def insert_data(cls, connection): + Parent, SubEntity = cls.classes("Parent", "SubEntity") + + with Session(connection) as session: + session.add(Parent(entity=SubEntity(name="some name"))) + session.commit() + + @testing.combinations( + defaultload, + joinedload, + selectinload, + lazyload, + argnames="first_option", + ) + @testing.combinations( + ("load_only", "id", True), + ("defer", "name", True), + ("undefer", "name", True), + ("raise", "name", False), + (None, None, True), + # these don't seem possible at the moment as the "type" column + # doesn't load and it can't recognize the polymorphic identity. + # we assume load_only() is smart enough to include this column + # ("defer", '*', True), + # ("undefer", '*', True), + # ("raise", '*', False), + argnames="second_option,second_argument,expect_load", + ) + def test_subclass_loadattr( + self, first_option, second_option, second_argument, expect_load + ): + Parent, Entity, SubEntity = self.classes( + "Parent", "Entity", "SubEntity" + ) + + stmt = select(Parent) + + will_lazyload = first_option in (defaultload, lazyload) + + opt = first_option(Parent.entity) + + if second_argument == "name": + second_argument = SubEntity.name + elif second_argument == "id": + second_argument = Entity.id + + if second_option is None: + sub_opt = opt + elif second_option == "raise": + sub_opt = opt.defer(second_argument, raiseload=True) + else: + sub_opt = getattr(opt, second_option)(second_argument) + + stmt = stmt.options(sub_opt) + + session = fixture_session() + result = session.execute(stmt).scalars() + + parent_obj = result.first() + + entity_id = parent_obj.__dict__["entity_id"] + + with assertsql.assert_engine(testing.db) as asserter_: + if expect_load: + eq_(parent_obj.entity.name, "some name") + else: + with expect_raises_message( + exc.InvalidRequestError, + "'SubEntity.name' is not available due to raiseload=True", + ): + parent_obj.entity.name + + expected = [] + + if will_lazyload: + expected.append( + CompiledSQL( + "SELECT entity.id AS entity_id, " + "entity.type AS entity_type FROM entity " + "WHERE entity.id = :pk_1", + [{"pk_1": entity_id}], + ) + ) + + if second_option in ("undefer", "load_only", None): + # load will be a mapper optimized load for the name alone + expected.append( + CompiledSQL( + "SELECT sub_entity.name AS sub_entity_name " + "FROM sub_entity " + "WHERE :param_1 = sub_entity.id", + [{"param_1": entity_id}], + ) + ) + elif second_option == "defer": + # load will be a deferred load. this is because the explicit + # call to the deferred load put a deferred loader on the attribute + expected.append( + CompiledSQL( + "SELECT sub_entity.name AS sub_entity_name FROM entity " + "JOIN sub_entity ON entity.id = sub_entity.id " + "WHERE entity.id = :pk_1", + [{"pk_1": entity_id}], + ) + ) + + asserter_.assert_(*expected) From d8c12d9f180eeed9d2c5174eeb0c74169a04262f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 9 Nov 2021 15:02:44 -0500 Subject: [PATCH 020/632] set within_columns_clause=False for all sub-elements of select() Fixed issue where using the feature of using a string label for ordering or grouping described at :ref:`tutorial_order_by_label` would fail to function correctly if used on a :class:`.CTE` construct, when the CTE were embedded inside of an enclosing :class:`_sql.Select` statement that itself was set up as a scalar subquery. Fixes: #7269 Change-Id: Ied6048a1c9a622374a418230c8cfedafa8d3f87e (cherry picked from commit 89661c1a218b7117c1835698dbb81836e72015ae) --- doc/build/changelog/unreleased_14/7269.rst | 11 +++++ lib/sqlalchemy/sql/compiler.py | 3 ++ test/sql/test_cte.py | 48 ++++++++++++++++++++++ 3 files changed, 62 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7269.rst diff --git a/doc/build/changelog/unreleased_14/7269.rst b/doc/build/changelog/unreleased_14/7269.rst new file mode 100644 index 00000000000..6bbd126052d --- /dev/null +++ b/doc/build/changelog/unreleased_14/7269.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, sql + :tickets: 7269 + + Fixed issue where using the feature of using a string label for ordering or + grouping described at :ref:`tutorial_order_by_label` would fail to function + correctly if used on a :class:`.CTE` construct, when the CTE were embedded + inside of an enclosing :class:`_sql.Select` statement that itself was set + up as a scalar subquery. + + diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 7db8d6b5d6a..0f2b5e717be 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -3217,6 +3217,9 @@ def visit_select( # passed in. for ORM use this will convert from an ORM-state # SELECT to a regular "Core" SELECT. other composed operations # such as computation of joins will be performed. + + kwargs["within_columns_clause"] = False + compile_state = select_stmt._compile_state_factory( select_stmt, self, **kwargs ) diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index 22107eeee51..10fe81b5530 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -551,6 +551,54 @@ def test_wrecur_dupe_col_names(self): "SELECT cte.id, cte.manager_id, cte.id_1 FROM cte", ) + @testing.combinations(True, False, argnames="use_object") + @testing.combinations("order_by", "group_by", argnames="order_by") + def test_order_by_group_by_label_w_scalar_subquery( + self, use_object, order_by + ): + """test issue #7269""" + t = table("test", column("a")) + + b = t.c.a.label("b") + + if use_object: + arg = b + else: + arg = "b" + + if order_by == "order_by": + cte = select(b).order_by(arg).cte() + elif order_by == "group_by": + cte = select(b).group_by(arg).cte() + else: + assert False + + stmt = select(select(cte.c.b).label("c")) + + if use_object and order_by == "group_by": + # group_by(b) is de-references the label, due a difference in + # handling between coercions.GroupByImpl and coercions.OrderByImpl. + # "order by" makes use of the ClauseElement._order_by_label_element + # feature but group_by() doesn't. it's not clear if group_by() + # could do the same thing order_by() does. + self.assert_compile( + stmt, + "WITH anon_1 AS " + "(SELECT test.a AS b FROM test GROUP BY test.a) " + "SELECT (SELECT anon_1.b FROM anon_1) AS c", + ) + else: + self.assert_compile( + stmt, + "WITH anon_1 AS (SELECT test.a AS b FROM test %s b) " + "SELECT (SELECT anon_1.b FROM anon_1) AS c" + % ("ORDER BY" if order_by == "order_by" else "GROUP BY") + # prior to the fix, the use_object version came out as: + # "WITH anon_1 AS (SELECT test.a AS b FROM test " + # "ORDER BY test.a) " + # "SELECT (SELECT anon_1.b FROM anon_1) AS c" + ) + def test_wrecur_dupe_col_names_w_grouping(self): """test #6710 From f79df12bd6d99b8f6f09d4bf07722638c4b4c159 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Nov 2021 18:29:16 -0500 Subject: [PATCH 021/632] change the POSTCOMPILE/ SCHEMA symbols to not conflict w mssql quoting Adjusted the compiler's generation of "post compile" symbols including those used for "expanding IN" as well as for the "schema translate map" to not be based directly on plain bracketed strings with underscores, as this conflicts directly with SQL Server's quoting format of also using brackets, which produces false matches when the compiler replaces "post compile" and "schema translate" symbols. The issue created easy to reproduce examples both with the :meth:`.Inspector.get_schema_names` method when used in conjunction with the :paramref:`_engine.Connection.execution_options.schema_translate_map` feature, as well in the unlikely case that a symbol overlapping with the internal name "POSTCOMPILE" would be used with a feature like "expanding in". Fixes: #7300 Change-Id: I6255c850b140522a4aba95085216d0bca18ce230 (cherry picked from commit b919a0a85afd5066f9188b20ef06ee1b4af884a9) --- doc/build/changelog/unreleased_14/7300.rst | 17 ++++ doc/build/core/operators.rst | 10 +-- lib/sqlalchemy/dialects/mssql/base.py | 3 + lib/sqlalchemy/sql/compiler.py | 12 +-- .../testing/suite/test_reflection.py | 17 +++- lib/sqlalchemy/testing/suite/test_select.py | 2 +- test/dialect/mssql/test_compiler.py | 35 ++++---- test/dialect/mssql/test_query.py | 15 ++++ test/dialect/oracle/test_compiler.py | 90 ++++++++++--------- test/dialect/postgresql/test_types.py | 4 +- test/dialect/test_sqlite.py | 2 +- test/engine/test_execute.py | 78 ++++++++-------- test/orm/declarative/test_mixin.py | 2 +- test/orm/inheritance/test_deprecations.py | 4 +- test/orm/inheritance/test_poly_loading.py | 36 ++++---- test/orm/inheritance/test_relationship.py | 6 +- test/orm/inheritance/test_single.py | 65 +++++++------- test/orm/test_ac_relationships.py | 2 +- test/orm/test_deferred.py | 2 +- test/orm/test_deprecations.py | 15 ++-- test/orm/test_froms.py | 10 +-- test/orm/test_lockmode.py | 3 +- test/orm/test_of_type.py | 18 ++-- test/orm/test_query.py | 4 +- test/orm/test_relationship_criteria.py | 7 +- test/orm/test_relationships.py | 2 +- test/orm/test_selectin_relations.py | 54 +++++------ test/sql/test_compiler.py | 74 +++++++-------- test/sql/test_deprecations.py | 2 +- test/sql/test_external_traversal.py | 4 +- test/sql/test_lambdas.py | 12 +-- test/sql/test_operators.py | 36 ++++---- test/sql/test_selectable.py | 2 +- test/sql/test_type_expressions.py | 2 +- 34 files changed, 361 insertions(+), 286 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7300.rst diff --git a/doc/build/changelog/unreleased_14/7300.rst b/doc/build/changelog/unreleased_14/7300.rst new file mode 100644 index 00000000000..d9061af0941 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7300.rst @@ -0,0 +1,17 @@ +.. change:: + :tags: mssql, bug + :tickets: 7300 + + Adjusted the compiler's generation of "post compile" symbols including + those used for "expanding IN" as well as for the "schema translate map" to + not be based directly on plain bracketed strings with underscores, as this + conflicts directly with SQL Server's quoting format of also using brackets, + which produces false matches when the compiler replaces "post compile" and + "schema translate" symbols. The issue created easy to reproduce examples + both with the :meth:`.Inspector.get_schema_names` method when used in + conjunction with the + :paramref:`_engine.Connection.execution_options.schema_translate_map` + feature, as well in the unlikely case that a symbol overlapping with the + internal name "POSTCOMPILE" would be used with a feature like "expanding + in". + diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index 8d962560d58..d119db1e0cc 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -172,9 +172,9 @@ values to the :meth:`_sql.ColumnOperators.in_` method:: >>> print(column('x').in_([1, 2, 3])) - x IN ([POSTCOMPILE_x_1]) + x IN (__[POSTCOMPILE_x_1]) -The special bound form ``POSTCOMPILE`` is rendered into individual parameters +The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters at execution time, illustrated below: .. sourcecode:: pycon+sql @@ -212,12 +212,12 @@ NOT IN "NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator:: >>> print(column('x').not_in([1, 2, 3])) - (x NOT IN ([POSTCOMPILE_x_1])) + (x NOT IN (__[POSTCOMPILE_x_1])) This is typically more easily available by negating with the ``~`` operator:: >>> print(~column('x').in_([1, 2, 3])) - (x NOT IN ([POSTCOMPILE_x_1])) + (x NOT IN (__[POSTCOMPILE_x_1])) Tuple IN Expressions ~~~~~~~~~~~~~~~~~~~~ @@ -232,7 +232,7 @@ then receives a list of tuples:: >>> tup = tuple_(column('x', Integer), column('y', Integer)) >>> expr = tup.in_([(1, 2), (3, 4)]) >>> print(expr) - (x, y) IN ([POSTCOMPILE_param_1]) + (x, y) IN (__[POSTCOMPILE_param_1]) To illustrate the parameters rendered: diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 95ccd2ca892..8d2bc36ee05 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2615,6 +2615,9 @@ def _schema_elements(schema): # test/dialect/mssql/test_compiler.py -> test_schema_many_tokens_* # + if schema.startswith("__[SCHEMA_"): + return None, schema + push = [] symbol = "" bracket = False diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 7db8d6b5d6a..89bee0a57ab 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1248,7 +1248,7 @@ def process_expanding(m): return expr statement = re.sub( - r"\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]", + r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]", process_expanding, self.string, ) @@ -2375,9 +2375,9 @@ def visit_bindparam( # for postcompile w/ expanding, move the "wrapped" part # of this into the inside m = re.match( - r"^(.*)\(\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped + r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped ) - wrapped = "([POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( + wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( m.group(2), m.group(1), m.group(3), @@ -2583,7 +2583,7 @@ def bindparam_string( self.escaped_bind_names = {} self.escaped_bind_names[escaped_from] = name if post_compile: - return "[POSTCOMPILE_%s]" % name + return "__[POSTCOMPILE_%s]" % name else: return self.bindtemplate % {"name": name} @@ -5038,7 +5038,7 @@ def symbol_getter(obj): "in schema translate name '%s'" % name ) return quoted_name( - "[SCHEMA_%s]" % (name or "_none"), quote=False + "__[SCHEMA_%s]" % (name or "_none"), quote=False ) else: return obj.schema @@ -5064,7 +5064,7 @@ def replace(m): ) return self.quote_schema(effective_schema) - return re.sub(r"(\[SCHEMA_([^\]]+)\])", replace, statement) + return re.sub(r"(__\[SCHEMA_([^\]]+)\])", replace, statement) def _escape_identifier(self, value): """Escape an identifier. diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 88189c2d95c..6e6201de977 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -2,7 +2,6 @@ import re import sqlalchemy as sa -from sqlalchemy import func from .. import config from .. import engines from .. import eq_ @@ -15,6 +14,7 @@ from ..schema import Table from ... import event from ... import ForeignKey +from ... import func from ... import Identity from ... import inspect from ... import Integer @@ -25,6 +25,7 @@ from ...schema import DDL from ...schema import Index from ...sql.elements import quoted_name +from ...sql.schema import BLANK_SCHEMA from ...testing import is_false from ...testing import is_true @@ -512,6 +513,20 @@ def test_get_schema_names(self): self.assert_(testing.config.test_schema in insp.get_schema_names()) + @testing.requires.schema_reflection + def test_get_schema_names_w_translate_map(self, connection): + """test #7300""" + + connection = connection.execution_options( + schema_translate_map={ + "foo": "bar", + BLANK_SCHEMA: testing.config.test_schema, + } + ) + insp = inspect(connection) + + self.assert_(testing.config.test_schema in insp.get_schema_names()) + @testing.requires.schema_reflection def test_dialect_initialize(self): engine = engines.testing_engine() diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index 3e3ad04a782..cb78fff2e8e 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -884,7 +884,7 @@ def test_compile(self): self.assert_compile( stmt, "SELECT some_table.id FROM some_table " - "WHERE some_table.x = [POSTCOMPILE_q]", + "WHERE some_table.x = __[POSTCOMPILE_q]", {}, ) diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py index cf8894f4242..1f76e0969e7 100644 --- a/test/dialect/mssql/test_compiler.py +++ b/test/dialect/mssql/test_compiler.py @@ -334,8 +334,8 @@ def test_update_to_select_schema(self): @testing.combinations( ( lambda: select(literal("x"), literal("y")), - "SELECT [POSTCOMPILE_param_1] AS anon_1, " - "[POSTCOMPILE_param_2] AS anon_2", + "SELECT __[POSTCOMPILE_param_1] AS anon_1, " + "__[POSTCOMPILE_param_2] AS anon_2", { "check_literal_execute": {"param_1": "x", "param_2": "y"}, "check_post_param": {}, @@ -344,7 +344,7 @@ def test_update_to_select_schema(self): ( lambda t: select(t).where(t.c.foo.in_(["x", "y", "z"])), "SELECT sometable.foo FROM sometable WHERE sometable.foo " - "IN ([POSTCOMPILE_foo_1])", + "IN (__[POSTCOMPILE_foo_1])", { "check_literal_execute": {"foo_1": ["x", "y", "z"]}, "check_post_param": {}, @@ -436,7 +436,8 @@ def test_noorderby_insubquery_limit(self): crit = q.c.myid == table1.c.myid self.assert_compile( select("*").where(crit), - "SELECT * FROM (SELECT TOP [POSTCOMPILE_param_1] mytable.myid AS " + "SELECT * FROM (SELECT TOP __[POSTCOMPILE_param_1] " + "mytable.myid AS " "myid FROM mytable ORDER BY mytable.myid) AS foo, mytable WHERE " "foo.myid = mytable.myid", ) @@ -810,10 +811,10 @@ def test_union(self): self.assert_compile( u, "SELECT t1.col3 AS col3, t1.col4 AS col4 " - "FROM t1 WHERE t1.col2 IN ([POSTCOMPILE_col2_1]) " + "FROM t1 WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) " "UNION SELECT t2.col3 AS col3, " "t2.col4 AS col4 FROM t2 WHERE t2.col2 IN " - "([POSTCOMPILE_col2_2]) ORDER BY col3, col4", + "(__[POSTCOMPILE_col2_2]) ORDER BY col3, col4", checkparams={ "col2_1": ["t1col2r1", "t1col2r2"], "col2_2": ["t2col2r2", "t2col2r3"], @@ -823,9 +824,9 @@ def test_union(self): u.alias("bar").select(), "SELECT bar.col3, bar.col4 FROM (SELECT " "t1.col3 AS col3, t1.col4 AS col4 FROM t1 " - "WHERE t1.col2 IN ([POSTCOMPILE_col2_1]) UNION " + "WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) UNION " "SELECT t2.col3 AS col3, t2.col4 AS col4 " - "FROM t2 WHERE t2.col2 IN ([POSTCOMPILE_col2_2])) AS bar", + "FROM t2 WHERE t2.col2 IN (__[POSTCOMPILE_col2_2])) AS bar", checkparams={ "col2_1": ["t1col2r1", "t1col2r2"], "col2_2": ["t2col2r2", "t2col2r3"], @@ -972,7 +973,7 @@ def test_limit_using_top(self): self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 10}, ) @@ -1000,7 +1001,7 @@ def test_limit_zero_using_top(self): self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 0}, ) @@ -1201,7 +1202,7 @@ def test_limit_zero_using_window(self): # of zero, so produces TOP 0 self.assert_compile( s, - "SELECT TOP [POSTCOMPILE_param_1] t.x, t.y FROM t " + "SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t " "WHERE t.x = :x_1 ORDER BY t.y", checkparams={"x_1": 5, "param_1": 0}, ) @@ -1445,21 +1446,21 @@ def test_column_computed(self, text, persisted): 5, 0, {"percent": True}, - "TOP [POSTCOMPILE_param_1] PERCENT", + "TOP __[POSTCOMPILE_param_1] PERCENT", {"param_1": 5}, ), ( 5, None, {"percent": True, "with_ties": True}, - "TOP [POSTCOMPILE_param_1] PERCENT WITH TIES", + "TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES", {"param_1": 5}, ), ( 5, 0, {"with_ties": True}, - "TOP [POSTCOMPILE_param_1] WITH TIES", + "TOP __[POSTCOMPILE_param_1] WITH TIES", {"param_1": 5}, ), ( @@ -1537,21 +1538,21 @@ def test_fetch(self, dialect_2012, fetch, offset, fetch_kw, exp, params): 5, 0, {"percent": True}, - "TOP [POSTCOMPILE_param_1] PERCENT", + "TOP __[POSTCOMPILE_param_1] PERCENT", {"param_1": 5}, ), ( 5, None, {"percent": True, "with_ties": True}, - "TOP [POSTCOMPILE_param_1] PERCENT WITH TIES", + "TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES", {"param_1": 5}, ), ( 5, 0, {"with_ties": True}, - "TOP [POSTCOMPILE_param_1] WITH TIES", + "TOP __[POSTCOMPILE_param_1] WITH TIES", {"param_1": 5}, ), ( diff --git a/test/dialect/mssql/test_query.py b/test/dialect/mssql/test_query.py index e5e3cd3ad29..4c02fc171c8 100644 --- a/test/dialect/mssql/test_query.py +++ b/test/dialect/mssql/test_query.py @@ -255,6 +255,21 @@ def test_fetchid_trigger(self, metadata, connection): r = connection.execute(t1.insert(), dict(descr="hello")) eq_(r.inserted_primary_key, (100,)) + def test_compiler_symbol_conflict(self, connection, metadata): + t = Table("t", metadata, Column("POSTCOMPILE_DATA", String(50))) + + t.create(connection) + + connection.execute(t.insert().values(POSTCOMPILE_DATA="some data")) + eq_( + connection.scalar( + select(t.c.POSTCOMPILE_DATA).where( + t.c.POSTCOMPILE_DATA.in_(["some data", "some other data"]) + ) + ), + "some data", + ) + @testing.provide_metadata def _test_disable_scope_identity(self): engine = engines.testing_engine(options={"use_scope_identity": False}) diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 08158eed470..22ffc888ab0 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -101,7 +101,7 @@ def test_bindparam_quote(self): def test_bindparam_quote_works_on_expanding(self): self.assert_compile( bindparam("uid", expanding=True), - "([POSTCOMPILE_uid])", + "(__[POSTCOMPILE_uid])", dialect=cx_oracle.dialect(), ) @@ -164,9 +164,9 @@ def test_limit_one(self): "anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " "WHERE ora_rn > " - "[POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, ) @@ -203,14 +203,14 @@ def test_limit_one_firstrows(self): self.assert_compile( s, "SELECT anon_1.col1, anon_1.col2 FROM " - "(SELECT /*+ FIRST_ROWS([POSTCOMPILE_param_1]) */ " + "(SELECT /*+ FIRST_ROWS(__[POSTCOMPILE_param_1]) */ " "anon_2.col1 AS col1, " "anon_2.col2 AS col2, ROWNUM AS ora_rn FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " "WHERE ora_rn > " - "[POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, dialect=oracle.OracleDialect(optimize_limits=True), ) @@ -229,9 +229,10 @@ def test_limit_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_3 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_2 " - "WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1", + "WHERE ora_rn > __[POSTCOMPILE_param_2]) anon_1", checkparams={"param_1": 10, "param_2": 20}, ) @@ -244,9 +245,10 @@ def test_limit_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_3 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_2 " - "WHERE ora_rn > [POSTCOMPILE_param_2]) anon_1", + "WHERE ora_rn > __[POSTCOMPILE_param_2]) anon_1", ) c = s2.compile(dialect=oracle.OracleDialect()) eq_(len(c._result_columns), 2) @@ -264,8 +266,8 @@ def test_limit_three(self): "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " "sometable.col2) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 20}, ) c = s.compile(dialect=oracle.OracleDialect()) @@ -281,7 +283,7 @@ def test_limit_four(self): "SELECT anon_1.col1, anon_1.col2 FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " - "sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "sometable.col2) anon_1 WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE", checkparams={"param_1": 10}, ) @@ -292,11 +294,11 @@ def test_limit_four_firstrows(self): s = select(t).with_for_update().limit(10).order_by(t.c.col2) self.assert_compile( s, - "SELECT /*+ FIRST_ROWS([POSTCOMPILE_param_1]) */ " + "SELECT /*+ FIRST_ROWS(__[POSTCOMPILE_param_1]) */ " "anon_1.col1, anon_1.col2 FROM (SELECT " "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " - "sometable.col2) anon_1 WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "sometable.col2) anon_1 WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE", checkparams={"param_1": 10}, dialect=oracle.OracleDialect(optimize_limits=True), @@ -314,8 +316,8 @@ def test_limit_five(self): "sometable.col1 AS col1, sometable.col2 AS " "col2 FROM sometable ORDER BY " "sometable.col2) anon_2 WHERE ROWNUM <= " - "[POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] FOR " + "__[POSTCOMPILE_param_1] + __[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2] FOR " "UPDATE", checkparams={"param_1": 10, "param_2": 20}, ) @@ -335,7 +337,7 @@ def test_limit_six(self): "col1, anon_2.col2 AS col2, ROWNUM AS ora_rn FROM " "(SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable ORDER BY sometable.col2) anon_2 WHERE " - "ROWNUM <= [POSTCOMPILE_param_1] + :param_2 + :param_3) anon_1 " + "ROWNUM <= __[POSTCOMPILE_param_1] + :param_2 + :param_3) anon_1 " "WHERE ora_rn > :param_2 + :param_3", checkparams={"param_1": 10, "param_2": 10, "param_3": 20}, ) @@ -357,7 +359,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)", True)) @@ -369,7 +371,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label("SUM(ABC)_") @@ -381,7 +383,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)_" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)_" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) col = literal_column("SUM(ABC)").label(quoted_name("SUM(ABC)_", True)) @@ -393,7 +395,7 @@ def test_limit_special_quoting(self): 'SELECT anon_1."SUM(ABC)_" FROM ' '(SELECT SUM(ABC) AS "SUM(ABC)_" ' "FROM my_table ORDER BY SUM(ABC)) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", ) def test_for_update(self): @@ -511,7 +513,7 @@ def test_for_update_of_w_limit_adaption_col_present(self): "SELECT anon_1.myid, anon_1.name FROM " "(SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "myid_1": 7}, ) @@ -527,7 +529,7 @@ def test_for_update_of_w_limit_adaption_col_unpresent(self): "SELECT anon_1.myid FROM " "(SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] " "FOR UPDATE OF anon_1.name NOWAIT", ) @@ -545,9 +547,10 @@ def test_for_update_of_w_limit_offset_adaption_col_present(self): "ROWNUM AS ora_rn " "FROM (SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -566,8 +569,9 @@ def test_for_update_of_w_limit_offset_adaption_col_unpresent(self): "FROM (SELECT mytable.myid AS myid, mytable.name AS name " "FROM mytable WHERE mytable.myid = :myid_1) anon_2 " "WHERE " - "ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.name NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -587,9 +591,10 @@ def test_for_update_of_w_limit_offset_adaption_partial_col_unpresent(self): "mytable.bar AS bar, " "mytable.foo AS foo FROM mytable " "WHERE mytable.myid = :myid_1) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2] " + "WHERE ora_rn > __[POSTCOMPILE_param_2] " "FOR UPDATE OF anon_1.foo, anon_1.bar NOWAIT", checkparams={"param_1": 10, "param_2": 50, "myid_1": 7}, ) @@ -617,7 +622,7 @@ def test_use_binds_for_limits_disabled_one(self): "SELECT anon_1.col1, anon_1.col2 FROM " "(SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_1 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -635,7 +640,7 @@ def test_use_binds_for_limits_disabled_two(self): "anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_1]", + "WHERE ora_rn > __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -653,9 +658,9 @@ def test_use_binds_for_limits_disabled_three(self): "anon_2.col1 AS col1, anon_2.col2 AS col2, ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + " - "[POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", dialect=dialect, ) @@ -672,7 +677,7 @@ def test_use_binds_for_limits_enabled_one(self): "SELECT anon_1.col1, anon_1.col2 FROM " "(SELECT sometable.col1 AS col1, " "sometable.col2 AS col2 FROM sometable) anon_1 WHERE ROWNUM " - "<= [POSTCOMPILE_param_1]", + "<= __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -691,7 +696,7 @@ def test_use_binds_for_limits_enabled_two(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_1]", + "WHERE ora_rn > __[POSTCOMPILE_param_1]", dialect=dialect, ) @@ -710,9 +715,9 @@ def test_use_binds_for_limits_enabled_three(self): "ROWNUM AS ora_rn " "FROM (SELECT sometable.col1 AS col1, sometable.col2 AS col2 " "FROM sometable) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + " - "[POSTCOMPILE_param_2]) anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) anon_1 " + "WHERE ora_rn > __[POSTCOMPILE_param_2]", dialect=dialect, checkparams={"param_1": 10, "param_2": 10}, ) @@ -914,9 +919,10 @@ def test_outer_join_five(self): "thirdtable.userid(+) = " "myothertable.otherid AND mytable.myid = " "myothertable.otherid ORDER BY mytable.name) anon_2 " - "WHERE ROWNUM <= [POSTCOMPILE_param_1] + [POSTCOMPILE_param_2]) " + "WHERE ROWNUM <= __[POSTCOMPILE_param_1] + " + "__[POSTCOMPILE_param_2]) " "anon_1 " - "WHERE ora_rn > [POSTCOMPILE_param_2]", + "WHERE ora_rn > __[POSTCOMPILE_param_2]", checkparams={"param_1": 10, "param_2": 5}, dialect=oracle.dialect(use_ansi=False), ) diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index d1c0361e4f9..4f26a6ef661 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -1208,7 +1208,7 @@ def test_array_in_enum_psycopg2_cast(self): self.assert_compile( expr, - "x IN ([POSTCOMPILE_x_1~~~~REPL~~::myenum[]~~])", + "x IN (__[POSTCOMPILE_x_1~~~~REPL~~::myenum[]~~])", dialect=postgresql.psycopg2.dialect(), ) @@ -1226,7 +1226,7 @@ def test_array_in_str_psycopg2_cast(self): self.assert_compile( expr, - "x IN ([POSTCOMPILE_x_1~~~~REPL~~::VARCHAR(15)[]~~])", + "x IN (__[POSTCOMPILE_x_1~~~~REPL~~::VARCHAR(15)[]~~])", dialect=postgresql.psycopg2.dialect(), ) diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index ed0f11907cf..2e0eccc96bb 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -1159,7 +1159,7 @@ def test_in_tuple(self): .in_([(1, 2), (3, 4)]) .compile(dialect=sqlite.dialect()) ) - eq_(str(compiled), "(q, p) IN ([POSTCOMPILE_param_1])") + eq_(str(compiled), "(q, p) IN (__[POSTCOMPILE_param_1])") eq_( compiled._literal_execute_expanding_parameter( "param_1", diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index cbc2fd1e9b9..bb90f66d340 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -1207,12 +1207,12 @@ def test_create_table(self, plain_tables, connection): t1.drop(conn) asserter.assert_( - CompiledSQL("CREATE TABLE [SCHEMA__none].t1 (x INTEGER)"), - CompiledSQL("CREATE TABLE [SCHEMA_foo].t2 (x INTEGER)"), - CompiledSQL("CREATE TABLE [SCHEMA_bar].t3 (x INTEGER)"), - CompiledSQL("DROP TABLE [SCHEMA_bar].t3"), - CompiledSQL("DROP TABLE [SCHEMA_foo].t2"), - CompiledSQL("DROP TABLE [SCHEMA__none].t1"), + CompiledSQL("CREATE TABLE __[SCHEMA__none].t1 (x INTEGER)"), + CompiledSQL("CREATE TABLE __[SCHEMA_foo].t2 (x INTEGER)"), + CompiledSQL("CREATE TABLE __[SCHEMA_bar].t3 (x INTEGER)"), + CompiledSQL("DROP TABLE __[SCHEMA_bar].t3"), + CompiledSQL("DROP TABLE __[SCHEMA_foo].t2"), + CompiledSQL("DROP TABLE __[SCHEMA__none].t1"), ) def test_ddl_hastable(self, plain_tables, connection): @@ -1312,27 +1312,29 @@ def test_option_on_execute(self, plain_tables, connection): conn._execute_20(t3.delete(), execution_options=execution_options) asserter.assert_( - CompiledSQL("INSERT INTO [SCHEMA__none].t1 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_foo].t2 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"), CompiledSQL( - "UPDATE [SCHEMA__none].t1 SET x=:x WHERE " - "[SCHEMA__none].t1.x = :x_1" + "UPDATE __[SCHEMA__none].t1 SET x=:x WHERE " + "__[SCHEMA__none].t1.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_foo].t2 SET x=:x WHERE " - "[SCHEMA_foo].t2.x = :x_1" + "UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE " + "__[SCHEMA_foo].t2.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_bar].t3 SET x=:x WHERE " - "[SCHEMA_bar].t3.x = :x_1" + "UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE " + "__[SCHEMA_bar].t3.x = :x_1" ), - CompiledSQL("SELECT [SCHEMA__none].t1.x FROM [SCHEMA__none].t1"), - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2"), - CompiledSQL("SELECT [SCHEMA_bar].t3.x FROM [SCHEMA_bar].t3"), - CompiledSQL("DELETE FROM [SCHEMA__none].t1"), - CompiledSQL("DELETE FROM [SCHEMA_foo].t2"), - CompiledSQL("DELETE FROM [SCHEMA_bar].t3"), + CompiledSQL( + "SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1" + ), + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"), + CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"), + CompiledSQL("DELETE FROM __[SCHEMA__none].t1"), + CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"), + CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"), ) def test_crud(self, plain_tables, connection): @@ -1370,27 +1372,29 @@ def test_crud(self, plain_tables, connection): conn.execute(t3.delete()) asserter.assert_( - CompiledSQL("INSERT INTO [SCHEMA__none].t1 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_foo].t2 (x) VALUES (:x)"), - CompiledSQL("INSERT INTO [SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA__none].t1 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_foo].t2 (x) VALUES (:x)"), + CompiledSQL("INSERT INTO __[SCHEMA_bar].t3 (x) VALUES (:x)"), + CompiledSQL( + "UPDATE __[SCHEMA__none].t1 SET x=:x WHERE " + "__[SCHEMA__none].t1.x = :x_1" + ), CompiledSQL( - "UPDATE [SCHEMA__none].t1 SET x=:x WHERE " - "[SCHEMA__none].t1.x = :x_1" + "UPDATE __[SCHEMA_foo].t2 SET x=:x WHERE " + "__[SCHEMA_foo].t2.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_foo].t2 SET x=:x WHERE " - "[SCHEMA_foo].t2.x = :x_1" + "UPDATE __[SCHEMA_bar].t3 SET x=:x WHERE " + "__[SCHEMA_bar].t3.x = :x_1" ), CompiledSQL( - "UPDATE [SCHEMA_bar].t3 SET x=:x WHERE " - "[SCHEMA_bar].t3.x = :x_1" + "SELECT __[SCHEMA__none].t1.x FROM __[SCHEMA__none].t1" ), - CompiledSQL("SELECT [SCHEMA__none].t1.x FROM [SCHEMA__none].t1"), - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2"), - CompiledSQL("SELECT [SCHEMA_bar].t3.x FROM [SCHEMA_bar].t3"), - CompiledSQL("DELETE FROM [SCHEMA__none].t1"), - CompiledSQL("DELETE FROM [SCHEMA_foo].t2"), - CompiledSQL("DELETE FROM [SCHEMA_bar].t3"), + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2"), + CompiledSQL("SELECT __[SCHEMA_bar].t3.x FROM __[SCHEMA_bar].t3"), + CompiledSQL("DELETE FROM __[SCHEMA__none].t1"), + CompiledSQL("DELETE FROM __[SCHEMA_foo].t2"), + CompiledSQL("DELETE FROM __[SCHEMA_bar].t3"), ) def test_via_engine(self, plain_tables, metadata): @@ -1412,7 +1416,7 @@ def test_via_engine(self, plain_tables, metadata): with eng.connect() as conn: conn.execute(select(t2.c.x)) asserter.assert_( - CompiledSQL("SELECT [SCHEMA_foo].t2.x FROM [SCHEMA_foo].t2") + CompiledSQL("SELECT __[SCHEMA_foo].t2.x FROM __[SCHEMA_foo].t2") ) diff --git a/test/orm/declarative/test_mixin.py b/test/orm/declarative/test_mixin.py index 664c0063038..f3feb5ddf23 100644 --- a/test/orm/declarative/test_mixin.py +++ b/test/orm/declarative/test_mixin.py @@ -1639,7 +1639,7 @@ class Derived(Base): self.assert_compile( s.query(Derived.data_syn).filter(Derived.data_syn == "foo"), "SELECT test.data AS test_data FROM test WHERE test.data = " - ":data_1 AND test.type IN ([POSTCOMPILE_type_1])", + ":data_1 AND test.type IN (__[POSTCOMPILE_type_1])", dialect="default", checkparams={"type_1": ["derived"], "data_1": "foo"}, ) diff --git a/test/orm/inheritance/test_deprecations.py b/test/orm/inheritance/test_deprecations.py index 8c807c1152e..6f370d5e47d 100644 --- a/test/orm/inheritance/test_deprecations.py +++ b/test/orm/inheritance/test_deprecations.py @@ -532,7 +532,7 @@ def test_of_type_aliased_fromjoinpoint(self): "companies.name AS companies_name FROM companies " "LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) @@ -739,7 +739,7 @@ def test_query_wpoly_single_inh_subclass(self): "engineer.engineer_info AS engineer_engineer_info, " "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " - "AS anon_1 WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "AS anon_1 WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 35822a29e9f..c18b21d99d8 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -110,7 +110,7 @@ def _assert_all_selectin(self, q): "a.type AS a_type, " "asub.asubdata AS asub_asubdata FROM a JOIN asub " "ON a.id = asub.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY a.id", {"primary_keys": [2]}, ), @@ -123,13 +123,13 @@ def _assert_all_selectin(self, q): "SELECT c.a_sub_id AS c_a_sub_id, " "c.id AS c_id " "FROM c WHERE c.a_sub_id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [2]}, ), ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id FROM b " - "WHERE b.a_id IN ([POSTCOMPILE_primary_keys])", + "WHERE b.a_id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1, 2]}, ), ), @@ -209,7 +209,7 @@ def test_person_selectin_subclasses(self): "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers " "ON people.person_id = engineers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2, 5]}, ), @@ -221,7 +221,7 @@ def test_person_selectin_subclasses(self): "managers.manager_name AS managers_manager_name " "FROM people JOIN managers " "ON people.person_id = managers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [3, 4]}, ), @@ -255,7 +255,7 @@ def test_load_company_plus_employees(self): "people.person_id AS people_person_id, " "people.name AS people_name, people.type AS people_type " "FROM people WHERE people.company_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2]}, ), @@ -269,7 +269,7 @@ def test_load_company_plus_employees(self): "managers.manager_name AS managers_manager_name " "FROM people JOIN managers " "ON people.person_id = managers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [3, 4]}, ), @@ -283,7 +283,7 @@ def test_load_company_plus_employees(self): "engineers.primary_language AS engineers_primary_language " "FROM people JOIN engineers " "ON people.person_id = engineers.person_id " - "WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE people.person_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY people.person_id", {"primary_keys": [1, 2, 5]}, ), @@ -338,7 +338,8 @@ def test_threelevel_selectin_to_inline_mapped(self): "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( @@ -346,7 +347,8 @@ def test_threelevel_selectin_to_inline_mapped(self): "c.c_data AS c_c_data, " "c.d_data AS c_d_data, c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), @@ -393,7 +395,8 @@ def test_threelevel_selectin_to_inline_options(self): "c.c_data AS c_c_data, c.e_data AS c_e_data, " "c.d_data AS c_d_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), CompiledSQL( @@ -401,7 +404,8 @@ def test_threelevel_selectin_to_inline_options(self): "c.c_data AS c_c_data, c.d_data AS c_d_data, " "c.e_data AS c_e_data " "FROM a JOIN c ON a.id = c.id " - "WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id", + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", [{"primary_keys": [1, 2]}], ), ), @@ -469,7 +473,7 @@ def test_threelevel_selectin_to_inline_awkward_alias_options(self): "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY poly.a_id", [{"primary_keys": [1, 2]}], ), @@ -485,7 +489,7 @@ def test_threelevel_selectin_to_inline_awkward_alias_options(self): "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " "LEFT OUTER JOIN d ON c.id = d.id " "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY poly.a_id", [{"primary_keys": [1, 2]}], ), @@ -622,7 +626,7 @@ def no_opt(): "child.type AS child_type " "FROM child JOIN child_subclass1 " "ON child.id = child_subclass1.id " - "WHERE child.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE child.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY child.id", [{"primary_keys": [1]}], ), @@ -672,7 +676,7 @@ def no_opt(): "ON child.id = child_subclass1.id " "LEFT OUTER JOIN other AS other_1 " "ON child_subclass1.id = other_1.child_subclass_id " - "WHERE child.id IN ([POSTCOMPILE_primary_keys]) " + "WHERE child.id IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY child.id", [{"primary_keys": [1]}], ), diff --git a/test/orm/inheritance/test_relationship.py b/test/orm/inheritance/test_relationship.py index eeb3a7ed636..d12cb1999a0 100644 --- a/test/orm/inheritance/test_relationship.py +++ b/test/orm/inheritance/test_relationship.py @@ -2190,7 +2190,7 @@ def test_contains_eager_multi_alias(self): "a_b.kind AS a_b_kind, a_b.a_id AS a_b_a_id, a.id AS a_id_1, " "a.kind AS a_kind, a.a_id AS a_a_id FROM a " "LEFT OUTER JOIN a AS a_b ON a.id = a_b.a_id AND a_b.kind IN " - "([POSTCOMPILE_kind_1]) LEFT OUTER JOIN x AS b_x " + "(__[POSTCOMPILE_kind_1]) LEFT OUTER JOIN x AS b_x " "ON a_b.id = b_x.a_id", ) @@ -2383,7 +2383,7 @@ def _test_poly_single_poly(self, fn): joinedload(cls.links).joinedload(Link.child).joinedload(cls.links) ) if cls is self.classes.Sub1: - extra = " WHERE parent.type IN ([POSTCOMPILE_type_1])" + extra = " WHERE parent.type IN (__[POSTCOMPILE_type_1])" else: extra = "" @@ -2413,7 +2413,7 @@ def _test_single_poly_poly(self, fn): ) if Link.child.property.mapper.class_ is self.classes.Sub1: - extra = "AND parent_1.type IN ([POSTCOMPILE_type_1]) " + extra = "AND parent_1.type IN (__[POSTCOMPILE_type_1]) " else: extra = "" diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py index 30d4549c41b..fbafdd85be7 100644 --- a/test/orm/inheritance/test_single.py +++ b/test/orm/inheritance/test_single.py @@ -182,9 +182,9 @@ def test_discrim_bound_param_cloned_ok(self): self.assert_compile( select(subq1, subq2), "SELECT (SELECT employees.employee_id FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS foo, " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS foo, " "(SELECT employees.employee_id FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS bar", + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS bar", ) def test_multi_qualification(self): @@ -329,7 +329,7 @@ def test_from_self_legacy(self): "employees.engineer_info AS " "employees_engineer_info, employees.type " "AS employees_type FROM employees WHERE " - "employees.type IN ([POSTCOMPILE_type_1])) AS " + "employees.type IN (__[POSTCOMPILE_type_1])) AS " "anon_1", use_default_dialect=True, ) @@ -370,8 +370,8 @@ def test_from_subq(self): "employees.engineer_info AS " "employees_engineer_info, employees.type " "AS employees_type FROM employees WHERE " - "employees.type IN ([POSTCOMPILE_type_1])) AS " - "anon_1 WHERE anon_1.employees_type IN ([POSTCOMPILE_type_2])", + "employees.type IN (__[POSTCOMPILE_type_1])) AS " + "anon_1 WHERE anon_1.employees_type IN (__[POSTCOMPILE_type_2])", use_default_dialect=True, ) @@ -385,13 +385,13 @@ def test_select_from_aliased_w_subclass(self): sess.query(a1.employee_id).select_from(a1), "SELECT employees_1.employee_id AS employees_1_employee_id " "FROM employees AS employees_1 WHERE employees_1.type " - "IN ([POSTCOMPILE_type_1])", + "IN (__[POSTCOMPILE_type_1])", ) self.assert_compile( sess.query(literal("1")).select_from(a1), "SELECT :param_1 AS anon_1 FROM employees AS employees_1 " - "WHERE employees_1.type IN ([POSTCOMPILE_type_1])", + "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_from_statement_select(self): @@ -406,7 +406,7 @@ def test_from_statement_select(self): "SELECT employees.employee_id, employees.name, " "employees.manager_data, employees.engineer_info, " "employees.type FROM employees WHERE employees.type " - "IN ([POSTCOMPILE_type_1])", + "IN (__[POSTCOMPILE_type_1])", ) def test_from_statement_update(self): @@ -427,7 +427,7 @@ def test_from_statement_update(self): self.assert_compile( q, "UPDATE employees SET engineer_info=:engineer_info " - "WHERE employees.type IN ([POSTCOMPILE_type_1]) " + "WHERE employees.type IN (__[POSTCOMPILE_type_1]) " "RETURNING employees.employee_id", dialect="default_enhanced", ) @@ -452,7 +452,7 @@ def test_union_modifiers(self): "employees.engineer_info AS employees_engineer_info, " "employees.type AS employees_type FROM employees " "WHERE employees.engineer_info = :engineer_info_1 " - "AND employees.type IN ([POSTCOMPILE_type_1]) " + "AND employees.type IN (__[POSTCOMPILE_type_1]) " "%(token)s " "SELECT employees.employee_id AS employees_employee_id, " "employees.name AS employees_name, " @@ -460,7 +460,7 @@ def test_union_modifiers(self): "employees.engineer_info AS employees_engineer_info, " "employees.type AS employees_type FROM employees " "WHERE employees.manager_data = :manager_data_1 " - "AND employees.type IN ([POSTCOMPILE_type_2])) AS anon_1" + "AND employees.type IN (__[POSTCOMPILE_type_2])) AS anon_1" ) for meth, token in [ @@ -496,7 +496,7 @@ def test_having(self): "employees.name AS employees_name, employees.manager_data " "AS employees_manager_data, employees.engineer_info " "AS employees_engineer_info, employees.type AS employees_type " - "FROM employees WHERE employees.type IN ([POSTCOMPILE_type_1]) " + "FROM employees WHERE employees.type IN (__[POSTCOMPILE_type_1]) " "GROUP BY employees.employee_id HAVING employees.name = :name_1", ) @@ -511,7 +511,7 @@ def test_from_self_count(self): "SELECT count(*) AS count_1 " "FROM (SELECT employees.employee_id AS employees_employee_id " "FROM employees " - "WHERE employees.type IN ([POSTCOMPILE_type_1])) AS anon_1", + "WHERE employees.type IN (__[POSTCOMPILE_type_1])) AS anon_1", use_default_dialect=True, ) @@ -649,7 +649,7 @@ def test_exists_standalone(self): ), "SELECT EXISTS (SELECT 1 FROM employees WHERE " "employees.name = :name_1 AND employees.type " - "IN ([POSTCOMPILE_type_1])) AS anon_1", + "IN (__[POSTCOMPILE_type_1])) AS anon_1", ) def test_type_filtering(self): @@ -783,7 +783,8 @@ def test_subquery_load(self): CompiledSQL( "SELECT employee.id AS employee_id, employee.name AS " "employee_name, employee.type AS employee_type " - "FROM employee WHERE employee.type IN ([POSTCOMPILE_type_1])", + "FROM employee WHERE employee.type IN " + "(__[POSTCOMPILE_type_1])", params=[{"type_1": ["manager"]}], ), CompiledSQL( @@ -794,7 +795,7 @@ def test_subquery_load(self): "employee_stuff_name, anon_1.employee_id " "AS anon_1_employee_id FROM (SELECT " "employee.id AS employee_id FROM employee " - "WHERE employee.type IN ([POSTCOMPILE_type_1])) AS anon_1 " + "WHERE employee.type IN (__[POSTCOMPILE_type_1])) AS anon_1 " "JOIN employee_stuff ON anon_1.employee_id " "= employee_stuff.employee_id", params=[{"type_1": ["manager"]}], @@ -979,7 +980,7 @@ def test_outer_join_prop(self): "employees.name AS employees_name " "FROM companies LEFT OUTER JOIN employees ON companies.company_id " "= employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_prop_alias(self): @@ -1013,7 +1014,7 @@ def test_outer_join_prop_alias(self): "employees_1_name FROM companies LEFT OUTER " "JOIN employees AS employees_1 ON companies.company_id " "= employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_literal_onclause(self): @@ -1051,7 +1052,7 @@ def test_outer_join_literal_onclause(self): "employees.company_id AS employees_company_id FROM companies " "LEFT OUTER JOIN employees ON " "companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_literal_onclause_alias(self): @@ -1090,7 +1091,7 @@ def test_outer_join_literal_onclause_alias(self): "employees_1.company_id AS employees_1_company_id " "FROM companies LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_no_onclause(self): @@ -1126,7 +1127,7 @@ def test_outer_join_no_onclause(self): "employees.company_id AS employees_company_id " "FROM companies LEFT OUTER JOIN employees ON " "companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])", + "AND employees.type IN (__[POSTCOMPILE_type_1])", ) def test_outer_join_no_onclause_alias(self): @@ -1163,7 +1164,7 @@ def test_outer_join_no_onclause_alias(self): "employees_1.company_id AS employees_1_company_id " "FROM companies LEFT OUTER JOIN employees AS employees_1 ON " "companies.company_id = employees_1.company_id " - "AND employees_1.type IN ([POSTCOMPILE_type_1])", + "AND employees_1.type IN (__[POSTCOMPILE_type_1])", ) def test_correlated_column_select(self): @@ -1200,7 +1201,7 @@ def test_correlated_column_select(self): "(SELECT count(employees.employee_id) AS count_1 " "FROM employees WHERE employees.company_id = " "companies.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1])) AS anon_1 " + "AND employees.type IN (__[POSTCOMPILE_type_1])) AS anon_1 " "FROM companies", ) @@ -1282,8 +1283,8 @@ def test_no_aliasing_from_overlap(self): "ON companies.company_id = employees.company_id " "JOIN employees " "ON companies.company_id = employees.company_id " - "AND employees.type IN ([POSTCOMPILE_type_1]) " - "WHERE employees.type IN ([POSTCOMPILE_type_2])", + "AND employees.type IN (__[POSTCOMPILE_type_1]) " + "WHERE employees.type IN (__[POSTCOMPILE_type_2])", ) def test_relationship_to_subclass(self): @@ -1554,7 +1555,7 @@ def test_assert_join_sql(self): "child.name AS child_name " "FROM parent LEFT OUTER JOIN (m2m AS m2m_1 " "JOIN child ON child.id = m2m_1.child_id " - "AND child.discriminator IN ([POSTCOMPILE_discriminator_1])) " + "AND child.discriminator IN (__[POSTCOMPILE_discriminator_1])) " "ON parent.id = m2m_1.parent_id", ) @@ -1571,7 +1572,7 @@ def test_assert_joinedload_sql(self): "FROM parent LEFT OUTER JOIN " "(m2m AS m2m_1 JOIN child AS child_1 " "ON child_1.id = m2m_1.child_id AND child_1.discriminator " - "IN ([POSTCOMPILE_discriminator_1])) " + "IN (__[POSTCOMPILE_discriminator_1])) " "ON parent.id = m2m_1.parent_id", ) @@ -1780,7 +1781,7 @@ def test_wpoly_single_inh_subclass(self): "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " "AS anon_1 " - "WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) def test_query_wpoly_single_inh_subclass(self): @@ -1809,7 +1810,7 @@ def test_query_wpoly_single_inh_subclass(self): "engineer.engineer_info AS engineer_engineer_info, " "engineer.manager_id AS engineer_manager_id " "FROM employee JOIN engineer ON employee.id = engineer.id) " - "AS anon_1 WHERE anon_1.employee_type IN ([POSTCOMPILE_type_1])", + "AS anon_1 WHERE anon_1.employee_type IN (__[POSTCOMPILE_type_1])", ) @testing.combinations((True,), (False,), argnames="autoalias") @@ -1836,7 +1837,7 @@ def test_single_inh_subclass_join_joined_inh_subclass(self, autoalias): "JOIN (employee AS employee_1 JOIN engineer AS engineer_1 " "ON employee_1.id = engineer_1.id) " "ON engineer_1.manager_id = manager.id " - "WHERE employee.type IN ([POSTCOMPILE_type_1])", + "WHERE employee.type IN (__[POSTCOMPILE_type_1])", ) def test_single_inh_subclass_join_wpoly_joined_inh_subclass(self): @@ -1873,7 +1874,7 @@ def test_single_inh_subclass_join_wpoly_joined_inh_subclass(self): "FROM employee " "JOIN engineer ON employee.id = engineer.id) AS anon_1 " "ON anon_1.manager_id = manager.id " - "WHERE employee.type IN ([POSTCOMPILE_type_1])", + "WHERE employee.type IN (__[POSTCOMPILE_type_1])", ) @testing.combinations((True,), (False,), argnames="autoalias") @@ -1903,7 +1904,7 @@ def test_joined_inh_subclass_join_single_inh_subclass(self, autoalias): "JOIN (employee AS employee_1 JOIN manager AS manager_1 " "ON employee_1.id = manager_1.id) " "ON engineer.manager_id = manager_1.id " - "AND employee_1.type IN ([POSTCOMPILE_type_1])", + "AND employee_1.type IN (__[POSTCOMPILE_type_1])", ) diff --git a/test/orm/test_ac_relationships.py b/test/orm/test_ac_relationships.py index 6a050b698c6..f59d704f3f2 100644 --- a/test/orm/test_ac_relationships.py +++ b/test/orm/test_ac_relationships.py @@ -315,7 +315,7 @@ def test_selectinload(self): "SELECT a_1.id AS a_1_id, b.id AS b_id FROM a AS a_1 " "JOIN (b JOIN d ON d.b_id = b.id JOIN c ON c.id = d.c_id) " "ON a_1.b_id = b.id WHERE a_1.id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1]}], ), ) diff --git a/test/orm/test_deferred.py b/test/orm/test_deferred.py index bfdfb00b7fd..b14313e8bac 100644 --- a/test/orm/test_deferred.py +++ b/test/orm/test_deferred.py @@ -1167,7 +1167,7 @@ def _test_load_only_propagate(self, use_load): expected = [ ( "SELECT users.id AS users_id, users.name AS users_name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])", + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])", {"id_1": [7, 8]}, ), ( diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 692a29b3069..e69d145d455 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -8140,7 +8140,7 @@ def test_aliased_class_vs_nonaliased(self): lambda users: users.select().where(users.c.id.in_([7, 8])), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.name = :name_1", ), ( @@ -8150,14 +8150,14 @@ def test_aliased_class_vs_nonaliased(self): "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name " "AS anon_1_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.users_name = :name_1", ), ( lambda User, sess: sess.query(User).where(User.id.in_([7, 8])), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE anon_1.name = :name_1", ), ) @@ -8671,7 +8671,7 @@ def test_differentiate_self_external(self): "users_1.name AS users_1_name " "FROM users AS users_1, (" "SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8684,7 +8684,8 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, " "users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN " + "(__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8697,7 +8698,7 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, " "users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " - "users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -8712,7 +8713,7 @@ def test_differentiate_self_external(self): "FROM " "(SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id " - "IN ([POSTCOMPILE_id_1])) AS anon_1 " + "IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) diff --git a/test/orm/test_froms.py b/test/orm/test_froms.py index af3dd8a60f3..6e1c94e12f5 100644 --- a/test/orm/test_froms.py +++ b/test/orm/test_froms.py @@ -2779,7 +2779,7 @@ def test_differentiate_self_external(self): sess.query(User).join(sel, User.id > sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) " "AS anon_1 ON users.id > anon_1.id", ) @@ -2788,7 +2788,7 @@ def test_differentiate_self_external(self): "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users AS users_1, (" "SELECT users.id AS id, users.name AS name FROM users " - "WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "WHERE users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2797,7 +2797,7 @@ def test_differentiate_self_external(self): sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2806,7 +2806,7 @@ def test_differentiate_self_external(self): sess.query(ualias).select_from(ua).join(ualias, ualias.id > ua.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " - "users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) @@ -2816,7 +2816,7 @@ def test_differentiate_self_external(self): sess.query(salias).join(ualias, ualias.id > salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name " - "FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 " + "FROM users WHERE users.id IN (__[POSTCOMPILE_id_1])) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id", check_post_param={"id_1": [7, 8]}, ) diff --git a/test/orm/test_lockmode.py b/test/orm/test_lockmode.py index e073754848e..b296f22409d 100644 --- a/test/orm/test_lockmode.py +++ b/test/orm/test_lockmode.py @@ -345,7 +345,8 @@ def test_for_update_on_inner_w_joinedload_no_render_oracle(self): "FROM (SELECT anon_2.users_id AS users_id, " "anon_2.users_name AS users_name FROM " "(SELECT users.id AS users_id, users.name AS users_name " - "FROM users) anon_2 WHERE ROWNUM <= [POSTCOMPILE_param_1]) anon_1 " + "FROM users) anon_2 WHERE ROWNUM <= " + "__[POSTCOMPILE_param_1]) anon_1 " "LEFT OUTER JOIN addresses addresses_1 " "ON anon_1.users_id = addresses_1.user_id FOR UPDATE", dialect="oracle", diff --git a/test/orm/test_of_type.py b/test/orm/test_of_type.py index bdf7ab85923..09b3cf51c00 100644 --- a/test/orm/test_of_type.py +++ b/test/orm/test_of_type.py @@ -1151,10 +1151,10 @@ class C1(_C): "c.id AS c_id, c.type AS c_type, c.b_id AS c_b_id, a.id AS a_id, " "a.type AS a_type " "FROM a LEFT OUTER JOIN b ON " - "a.id = b.a_id AND b.type IN ([POSTCOMPILE_type_1]) " + "a.id = b.a_id AND b.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c ON " - "b.id = c.b_id AND c.type IN ([POSTCOMPILE_type_2]) " - "WHERE a.type IN ([POSTCOMPILE_type_3])" + "b.id = c.b_id AND c.type IN (__[POSTCOMPILE_type_2]) " + "WHERE a.type IN (__[POSTCOMPILE_type_3])" ) _query2 = ( @@ -1162,10 +1162,10 @@ class C1(_C): "ccc.id AS ccc_id, ccc.type AS ccc_type, ccc.b_id AS ccc_b_id, " "aaa.id AS aaa_id, aaa.type AS aaa_type " "FROM a AS aaa LEFT OUTER JOIN b AS bbb " - "ON aaa.id = bbb.a_id AND bbb.type IN ([POSTCOMPILE_type_1]) " + "ON aaa.id = bbb.a_id AND bbb.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c AS ccc ON " - "bbb.id = ccc.b_id AND ccc.type IN ([POSTCOMPILE_type_2]) " - "WHERE aaa.type IN ([POSTCOMPILE_type_3])" + "bbb.id = ccc.b_id AND ccc.type IN (__[POSTCOMPILE_type_2]) " + "WHERE aaa.type IN (__[POSTCOMPILE_type_3])" ) _query3 = ( @@ -1173,10 +1173,10 @@ class C1(_C): "c.id AS c_id, c.type AS c_type, c.b_id AS c_b_id, " "aaa.id AS aaa_id, aaa.type AS aaa_type " "FROM a AS aaa LEFT OUTER JOIN b AS bbb " - "ON aaa.id = bbb.a_id AND bbb.type IN ([POSTCOMPILE_type_1]) " + "ON aaa.id = bbb.a_id AND bbb.type IN (__[POSTCOMPILE_type_1]) " "LEFT OUTER JOIN c ON " - "bbb.id = c.b_id AND c.type IN ([POSTCOMPILE_type_2]) " - "WHERE aaa.type IN ([POSTCOMPILE_type_3])" + "bbb.id = c.b_id AND c.type IN (__[POSTCOMPILE_type_2]) " + "WHERE aaa.type IN (__[POSTCOMPILE_type_3])" ) def _test(self, join_of_type, of_type_for_c1, aliased_): diff --git a/test/orm/test_query.py b/test/orm/test_query.py index a4e2ab3fa07..433c11afc90 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -2044,7 +2044,9 @@ def test_op(self): def test_in(self): User = self.classes.User - self._test(User.id.in_(["a", "b"]), "users.id IN ([POSTCOMPILE_id_1])") + self._test( + User.id.in_(["a", "b"]), "users.id IN (__[POSTCOMPILE_id_1])" + ) def test_in_on_relationship_not_supported(self): User, Address = self.classes.User, self.classes.Address diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 86f7e9fc919..7e2c6e04f9f 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -367,7 +367,7 @@ def test_select_selectinload_mapper_mapper_criteria( "SELECT addresses.user_id AS addresses_user_id, addresses.id " "AS addresses_id, addresses.email_address " "AS addresses_email_address FROM addresses " - "WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " "AND addresses.email_address != :email_address_1 " "ORDER BY addresses.id", [{"primary_keys": [7, 8, 9, 10], "email_address_1": "name"}], @@ -1227,7 +1227,8 @@ def go(value): "SELECT addresses.user_id AS addresses_user_id, " "addresses.id AS addresses_id, addresses.email_address " "AS addresses_email_address FROM addresses " - "WHERE addresses.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE addresses.user_id IN " + "(__[POSTCOMPILE_primary_keys]) " "AND addresses.email_address != :email_address_1 " "ORDER BY addresses.id", [ @@ -1304,7 +1305,7 @@ def go(order_description, item_description): "ON items_1.id = order_items_1.item_id " "AND items_1.description = :description_1) " "ON orders.id = order_items_1.order_id " - "WHERE orders.user_id IN ([POSTCOMPILE_primary_keys]) " + "WHERE orders.user_id IN (__[POSTCOMPILE_primary_keys]) " "AND orders.description = :description_2 " "ORDER BY orders.id, items_1.id", [ diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py index 94b30f3d01a..98de9abad73 100644 --- a/test/orm/test_relationships.py +++ b/test/orm/test_relationships.py @@ -6513,7 +6513,7 @@ def test_eager_selectin(self): "(SELECT a.id AS aid, b.id AS id FROM a JOIN b ON a.b_ids " "LIKE :id_1 || b.id || :param_1) AS anon_1 " "ON a_1.id = anon_1.aid JOIN b ON b.id = anon_1.id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", params=[{"id_1": "%", "param_1": "%", "primary_keys": [2]}], ), ) diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index f01060aab50..2add1015ffc 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -1840,7 +1840,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1890,7 +1890,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1936,7 +1936,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -1990,7 +1990,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -2038,7 +2038,7 @@ def go(): "paperwork.paperwork_id AS paperwork_paperwork_id, " "paperwork.description AS paperwork_description " "FROM paperwork WHERE paperwork.person_id " - "IN ([POSTCOMPILE_primary_keys]) " + "IN (__[POSTCOMPILE_primary_keys]) " "ORDER BY paperwork.paperwork_id", [{"primary_keys": [1]}], ), @@ -2259,7 +2259,7 @@ def go(): CompiledSQL( "SELECT b.a_id1 AS b_a_id1, b.a_id2 AS b_a_id2, b.id AS b_id " "FROM b WHERE (b.a_id1, b.a_id2) IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", [{"primary_keys": [(i, i + 2) for i in range(1, 20)]}], ), ) @@ -2290,7 +2290,7 @@ def go(): ), CompiledSQL( "SELECT a.id1 AS a_id1, a.id2 AS a_id2 FROM a " - "WHERE (a.id1, a.id2) IN ([POSTCOMPILE_primary_keys])", + "WHERE (a.id1, a.id2) IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [(i, i + 2) for i in range(1, 20)]}], ), ) @@ -2364,19 +2364,19 @@ def go(): CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(1, 48))}, ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(48, 95))}, ), CompiledSQL( "SELECT b.a_id AS b_a_id, b.id AS b_id " "FROM b WHERE b.a_id IN " - "([POSTCOMPILE_primary_keys]) ORDER BY b.id", + "(__[POSTCOMPILE_primary_keys]) ORDER BY b.id", {"primary_keys": list(range(95, 101))}, ), ) @@ -2440,19 +2440,19 @@ def go(): # chunk size is 47. so first chunk are a 1->47... CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(1, 48))}, ), # second chunk is a 48-94 CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(48, 95))}, ), # third and final chunk 95-100. CompiledSQL( "SELECT a.id AS a_id FROM a WHERE a.id IN " - "([POSTCOMPILE_primary_keys])", + "(__[POSTCOMPILE_primary_keys])", {"primary_keys": list(range(95, 101))}, ), ) @@ -2983,13 +2983,13 @@ def test_twolevel_selectin_w_polymorphic(self): "SELECT foo_1.id AS foo_1_id, " "foo_1.type AS foo_1_type, foo_1.foo_id AS foo_1_foo_id " "FROM foo AS foo_1 " - "WHERE foo_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE foo_1.id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [3]}, ), CompiledSQL( "SELECT foo.id AS foo_id_1, foo.type AS foo_type, " "foo.foo_id AS foo_foo_id FROM foo " - "WHERE foo.id IN ([POSTCOMPILE_primary_keys])", + "WHERE foo.id IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1]}, ), ) @@ -3153,13 +3153,13 @@ def test_load(self): q.all, CompiledSQL( 'SELECT "user".id AS user_id, "user".type AS user_type ' - 'FROM "user" WHERE "user".type IN ([POSTCOMPILE_type_1])', + 'FROM "user" WHERE "user".type IN (__[POSTCOMPILE_type_1])', {"type_1": ["employer"]}, ), CompiledSQL( "SELECT role.user_id AS role_user_id, role.id AS role_id " "FROM role WHERE role.user_id " - "IN ([POSTCOMPILE_primary_keys])", + "IN (__[POSTCOMPILE_primary_keys])", {"primary_keys": [1]}, ), ) @@ -3277,12 +3277,12 @@ def test_use_join_parent_criteria(self): q.all, CompiledSQL( "SELECT a.id AS a_id, a.b_id AS a_b_id, a.q AS a_q " - "FROM a WHERE a.id IN ([POSTCOMPILE_id_1]) ORDER BY a.id", + "FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id", [{"id_1": [1, 3]}], ), CompiledSQL( "SELECT b.id AS b_id, b.x AS b_x, b.y AS b_y " - "FROM b WHERE b.id IN ([POSTCOMPILE_primary_keys])", + "FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2]}], ), ) @@ -3306,7 +3306,7 @@ def test_use_join_parent_criteria_degrade_on_defer(self): q.all, CompiledSQL( "SELECT a.id AS a_id, a.q AS a_q " - "FROM a WHERE a.id IN ([POSTCOMPILE_id_1]) ORDER BY a.id", + "FROM a WHERE a.id IN (__[POSTCOMPILE_id_1]) ORDER BY a.id", [{"id_1": [1, 3]}], ), # in the very unlikely case that the the FK col on parent is @@ -3317,7 +3317,7 @@ def test_use_join_parent_criteria_degrade_on_defer(self): "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y " "FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 3]}], ), ) @@ -3341,7 +3341,7 @@ def test_use_join(self): ), CompiledSQL( "SELECT b.id AS b_id, b.x AS b_x, b.y AS b_y " - "FROM b WHERE b.id IN ([POSTCOMPILE_primary_keys])", + "FROM b WHERE b.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2]}], ), ) @@ -3373,7 +3373,7 @@ def test_use_join_omit_join_false(self): CompiledSQL( "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2, 3, 4, 5]}], ), ) @@ -3408,7 +3408,7 @@ def test_use_join_parent_degrade_on_defer(self): "SELECT a_1.id AS a_1_id, b.id AS b_id, b.x AS b_x, " "b.y AS b_y " "FROM a AS a_1 JOIN b ON b.id = a_1.b_id " - "WHERE a_1.id IN ([POSTCOMPILE_primary_keys])", + "WHERE a_1.id IN (__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1, 2, 3, 4, 5]}], ), ) @@ -3520,13 +3520,15 @@ def test_load_both_wpoly(self): CompiledSQL( "SELECT child_a.parent_id AS child_a_parent_id, " "child_a.id AS child_a_id FROM child_a " - "WHERE child_a.parent_id IN ([POSTCOMPILE_primary_keys])", + "WHERE child_a.parent_id IN " + "(__[POSTCOMPILE_primary_keys])", [{"primary_keys": [1]}], ), CompiledSQL( "SELECT child_b.parent_id AS child_b_parent_id, " "child_b.id AS child_b_id FROM child_b " - "WHERE child_b.parent_id IN ([POSTCOMPILE_primary_keys])", + "WHERE child_b.parent_id IN " + "(__[POSTCOMPILE_primary_keys])", [{"primary_keys": [2]}], ), ), diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 419d14ce7c6..23cf4eca3ab 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -4174,7 +4174,7 @@ def test_tuple_expanding_in_no_values(self): ) self.assert_compile( expr, - "(mytable.myid, mytable.name) IN " "([POSTCOMPILE_param_1])", + "(mytable.myid, mytable.name) IN " "(__[POSTCOMPILE_param_1])", checkparams={"param_1": [(1, "foo"), (5, "bar")]}, check_post_param={"param_1": [(1, "foo"), (5, "bar")]}, check_literal_execute={}, @@ -4209,7 +4209,7 @@ def test_tuple_expanding_in_values(self): dialect.tuple_in_values = True self.assert_compile( tuple_(table1.c.myid, table1.c.name).in_([(1, "foo"), (5, "bar")]), - "(mytable.myid, mytable.name) IN " "([POSTCOMPILE_param_1])", + "(mytable.myid, mytable.name) IN " "(__[POSTCOMPILE_param_1])", dialect=dialect, checkparams={"param_1": [(1, "foo"), (5, "bar")]}, check_post_param={"param_1": [(1, "foo"), (5, "bar")]}, @@ -4345,7 +4345,7 @@ def test_expanding_parameter(self): tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), - "(mytable.myid, mytable.name) IN ([POSTCOMPILE_foo])", + "(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])", ) dialect = default.DefaultDialect() @@ -4354,13 +4354,13 @@ def test_expanding_parameter(self): tuple_(table1.c.myid, table1.c.name).in_( bindparam("foo", expanding=True) ), - "(mytable.myid, mytable.name) IN ([POSTCOMPILE_foo])", + "(mytable.myid, mytable.name) IN (__[POSTCOMPILE_foo])", dialect=dialect, ) self.assert_compile( table1.c.myid.in_(bindparam("foo", expanding=True)), - "mytable.myid IN ([POSTCOMPILE_foo])", + "mytable.myid IN (__[POSTCOMPILE_foo])", ) def test_limit_offset_select_literal_binds(self): @@ -4421,7 +4421,7 @@ class Compiler(compiler.StrSQLCompiler): ( "one", select(literal("someliteral")), - "SELECT [POSTCOMPILE_param_1] AS anon_1", + "SELECT __[POSTCOMPILE_param_1] AS anon_1", dict( check_literal_execute={"param_1": "someliteral"}, check_post_param={}, @@ -4430,14 +4430,14 @@ class Compiler(compiler.StrSQLCompiler): ( "two", select(table1.c.myid + 3), - "SELECT mytable.myid + [POSTCOMPILE_myid_1] " + "SELECT mytable.myid + __[POSTCOMPILE_myid_1] " "AS anon_1 FROM mytable", dict(check_literal_execute={"myid_1": 3}, check_post_param={}), ), ( "three", select(table1.c.myid.in_([4, 5, 6])), - "SELECT mytable.myid IN ([POSTCOMPILE_myid_1]) " + "SELECT mytable.myid IN (__[POSTCOMPILE_myid_1]) " "AS anon_1 FROM mytable", dict( check_literal_execute={"myid_1": [4, 5, 6]}, @@ -4447,14 +4447,14 @@ class Compiler(compiler.StrSQLCompiler): ( "four", select(func.mod(table1.c.myid, 5)), - "SELECT mod(mytable.myid, [POSTCOMPILE_mod_2]) " + "SELECT mod(mytable.myid, __[POSTCOMPILE_mod_2]) " "AS mod_1 FROM mytable", dict(check_literal_execute={"mod_2": 5}, check_post_param={}), ), ( "five", select(literal("foo").in_([])), - "SELECT [POSTCOMPILE_param_1] IN ([POSTCOMPILE_param_2]) " + "SELECT __[POSTCOMPILE_param_1] IN (__[POSTCOMPILE_param_2]) " "AS anon_1", dict( check_literal_execute={"param_1": "foo", "param_2": []}, @@ -4464,7 +4464,7 @@ class Compiler(compiler.StrSQLCompiler): ( "six", select(literal(util.b("foo"))), - "SELECT [POSTCOMPILE_param_1] AS anon_1", + "SELECT __[POSTCOMPILE_param_1] AS anon_1", dict( check_literal_execute={"param_1": util.b("foo")}, check_post_param={}, @@ -4473,7 +4473,7 @@ class Compiler(compiler.StrSQLCompiler): ( "seven", select(table1.c.myid == bindparam("foo", callable_=lambda: 5)), - "SELECT mytable.myid = [POSTCOMPILE_foo] AS anon_1 FROM mytable", + "SELECT mytable.myid = __[POSTCOMPILE_foo] AS anon_1 FROM mytable", dict(check_literal_execute={"foo": 5}, check_post_param={}), ), argnames="stmt, expected, kw", @@ -4495,7 +4495,7 @@ def test_render_literal_execute_parameter(self): table1.c.myid == bindparam("foo", 5, literal_execute=True) ), "SELECT mytable.myid FROM mytable " - "WHERE mytable.myid = [POSTCOMPILE_foo]", + "WHERE mytable.myid = __[POSTCOMPILE_foo]", ) def test_render_literal_execute_parameter_literal_binds(self): @@ -4540,7 +4540,7 @@ def test_render_expanding_parameter(self): table1.c.myid.in_(bindparam("foo", expanding=True)) ), "SELECT mytable.myid FROM mytable " - "WHERE mytable.myid IN ([POSTCOMPILE_foo])", + "WHERE mytable.myid IN (__[POSTCOMPILE_foo])", ) def test_render_expanding_parameter_literal_binds(self): @@ -5122,7 +5122,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 (q INTEGER)", + "CREATE TABLE __[SCHEMA__none].t1 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5134,7 +5134,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA_foo].t2 (q INTEGER)", + "CREATE TABLE __[SCHEMA_foo].t2 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5146,7 +5146,7 @@ def test_schema_translate_map_table(self): self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA_bar].t3 (q INTEGER)", + "CREATE TABLE __[SCHEMA_bar].t3 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5167,7 +5167,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 (q INTEGER)", + "CREATE TABLE __[SCHEMA__none].t1 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5179,7 +5179,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA_foo % ^ #].t2 (q INTEGER)", + "CREATE TABLE __[SCHEMA_foo % ^ #].t2 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5191,7 +5191,7 @@ def test_schema_translate_map_special_chars(self): self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA_bar {}].t3 (q INTEGER)", + "CREATE TABLE __[SCHEMA_bar {}].t3 (q INTEGER)", schema_translate_map=schema_translate_map, ) self.assert_compile( @@ -5236,39 +5236,39 @@ def test_schema_translate_map_sequence(self): self.assert_compile( schema.CreateSequence(s1), - "CREATE SEQUENCE [SCHEMA__none].s1 START WITH 1", + "CREATE SEQUENCE __[SCHEMA__none].s1 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s1.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateSequence(s2), - "CREATE SEQUENCE [SCHEMA_foo].s2 START WITH 1", + "CREATE SEQUENCE __[SCHEMA_foo].s2 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s2.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateSequence(s3), - "CREATE SEQUENCE [SCHEMA_bar].s3 START WITH 1", + "CREATE SEQUENCE __[SCHEMA_bar].s3 START WITH 1", schema_translate_map=schema_translate_map, ) self.assert_compile( s3.next_value(), - "", + "", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) @@ -5306,24 +5306,24 @@ def test_schema_translate_map_sequence_server_default(self): self.assert_compile( schema.CreateTable(t1), - "CREATE TABLE [SCHEMA__none].t1 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t1 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateTable(t2), - "CREATE TABLE [SCHEMA__none].t2 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t2 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", ) self.assert_compile( schema.CreateTable(t3), - "CREATE TABLE [SCHEMA__none].t3 " - "(id INTEGER DEFAULT " + "CREATE TABLE __[SCHEMA__none].t3 " + "(id INTEGER DEFAULT " "NOT NULL, PRIMARY KEY (id))", schema_translate_map=schema_translate_map, dialect="default_enhanced", @@ -5517,12 +5517,12 @@ def test_schema_translate_aliases(self): self.assert_compile( stmt, - "SELECT [SCHEMA__none].myothertable.otherid, " - "[SCHEMA__none].myothertable.othername, " + "SELECT __[SCHEMA__none].myothertable.otherid, " + "__[SCHEMA__none].myothertable.othername, " "mytable_1.myid, mytable_1.name, mytable_1.description " - "FROM [SCHEMA__none].myothertable JOIN " - "[SCHEMA__none].mytable AS mytable_1 " - "ON [SCHEMA__none].myothertable.otherid = mytable_1.myid " + "FROM __[SCHEMA__none].myothertable JOIN " + "__[SCHEMA__none].mytable AS mytable_1 " + "ON __[SCHEMA__none].myothertable.otherid = mytable_1.myid " "WHERE mytable_1.name = :name_1", schema_translate_map=schema_translate_map, ) diff --git a/test/sql/test_deprecations.py b/test/sql/test_deprecations.py index 9b74ab1fa65..426eb16aed2 100644 --- a/test/sql/test_deprecations.py +++ b/test/sql/test_deprecations.py @@ -2465,7 +2465,7 @@ def test_issue_5429_compile(self): self.assert_compile( column("x").notin_(["foo", "bar"]), - "(x NOT IN ([POSTCOMPILE_x_1]))", + "(x NOT IN (__[POSTCOMPILE_x_1]))", ) def test_issue_5429_operators(self): diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index 0d43448d5ed..e01ec0738e9 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -206,7 +206,7 @@ def test_bindparam_key_proc_for_copies(self, meth, name): and the compiler postcompile reg is:: - re.sub(r"\[POSTCOMPILE_(\S+)\]", process_expanding, self.string) + re.sub(r"\__[POSTCOMPILE_(\S+)\]", process_expanding, self.string) Interestingly, brackets in the name seems to work out. @@ -241,7 +241,7 @@ def test_expanding_in_bindparam_safe_to_clone(self): stmt = and_(expr, expr2) self.assert_compile( - stmt, "x IN ([POSTCOMPILE_x_1]) AND x IN ([POSTCOMPILE_x_1])" + stmt, "x IN (__[POSTCOMPILE_x_1]) AND x IN (__[POSTCOMPILE_x_1])" ) self.assert_compile( stmt, "x IN (1, 2, 3) AND x IN (1, 2, 3)", literal_binds=True diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index a53401a4f10..a2aa9705cb1 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -152,7 +152,7 @@ def go(val): asserter_.assert_( CompiledSQL( "SELECT users.id FROM users WHERE users.name " - "IN ([POSTCOMPILE_val_1]) ORDER BY users.id", + "IN (__[POSTCOMPILE_val_1]) ORDER BY users.id", params={"val_1": case}, ) ) @@ -1130,7 +1130,7 @@ def go(): def test_in_parameters_one(self): expr1 = select(1).where(column("q").in_(["a", "b", "c"])) - self.assert_compile(expr1, "SELECT 1 WHERE q IN ([POSTCOMPILE_q_1])") + self.assert_compile(expr1, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])") self.assert_compile( expr1, @@ -1141,7 +1141,7 @@ def test_in_parameters_one(self): def test_in_parameters_two(self): expr2 = select(1).where(lambda: column("q").in_(["a", "b", "c"])) - self.assert_compile(expr2, "SELECT 1 WHERE q IN ([POSTCOMPILE_q_1])") + self.assert_compile(expr2, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])") self.assert_compile( expr2, "SELECT 1 WHERE q IN (:q_1_1, :q_1_2, :q_1_3)", @@ -1153,7 +1153,7 @@ def test_in_parameters_three(self): expr3 = lambdas.lambda_stmt( lambda: select(1).where(column("q").in_(["a", "b", "c"])) ) - self.assert_compile(expr3, "SELECT 1 WHERE q IN ([POSTCOMPILE_q_1])") + self.assert_compile(expr3, "SELECT 1 WHERE q IN (__[POSTCOMPILE_q_1])") self.assert_compile( expr3, "SELECT 1 WHERE q IN (:q_1_1, :q_1_2, :q_1_3)", @@ -1169,7 +1169,7 @@ def go(names): expr4 = go(["a", "b", "c"]) self.assert_compile( - expr4, "SELECT 1 WHERE q IN ([POSTCOMPILE_names_1])" + expr4, "SELECT 1 WHERE q IN (__[POSTCOMPILE_names_1])" ) self.assert_compile( expr4, @@ -1821,7 +1821,7 @@ def test_detect_change_in_binds_tracking_negative(self): opts=lambdas.LambdaOptions(track_closure_variables=False), ) - self.assert_compile(elem.expr, "t1.q IN ([POSTCOMPILE_vv_1])") + self.assert_compile(elem.expr, "t1.q IN (__[POSTCOMPILE_vv_1])") assert_raises_message( exc.InvalidRequestError, diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 79aa4d79452..0e6f4f2d96c 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -1415,14 +1415,14 @@ def test_operator_precedence_5(self): self.assert_compile( self.table2.select().where(5 + self.table2.c.field.in_([5, 6])), "SELECT op.field FROM op WHERE :param_1 + " - "(op.field IN ([POSTCOMPILE_field_1]))", + "(op.field IN (__[POSTCOMPILE_field_1]))", ) def test_operator_precedence_6(self): self.assert_compile( self.table2.select().where((5 + self.table2.c.field).in_([5, 6])), "SELECT op.field FROM op WHERE :field_1 + op.field " - "IN ([POSTCOMPILE_param_1])", + "IN (__[POSTCOMPILE_param_1])", ) def test_operator_precedence_7(self): @@ -1766,28 +1766,28 @@ class InTest(fixtures.TestBase, testing.AssertsCompiledSQL): def test_in_1(self): self.assert_compile( self.table1.c.myid.in_(["a"]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a"]}, ) def test_in_2(self): self.assert_compile( ~self.table1.c.myid.in_(["a"]), - "(mytable.myid NOT IN ([POSTCOMPILE_myid_1]))", + "(mytable.myid NOT IN (__[POSTCOMPILE_myid_1]))", checkparams={"myid_1": ["a"]}, ) def test_in_3(self): self.assert_compile( self.table1.c.myid.in_(["a", "b"]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a", "b"]}, ) def test_in_4(self): self.assert_compile( self.table1.c.myid.in_(iter(["a", "b"])), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": ["a", "b"]}, ) @@ -1882,7 +1882,7 @@ def test_in_18(self): def test_in_19(self): self.assert_compile( self.table1.c.myid.in_([1, 2, 3]), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": [1, 2, 3]}, ) @@ -1989,7 +1989,7 @@ def test_in_29(self, is_in): if is_in: self.assert_compile( expr, - "(a, b, c) %s ([POSTCOMPILE_param_1])" + "(a, b, c) %s (__[POSTCOMPILE_param_1])" % ("IN" if is_in else "NOT IN"), checkparams={"param_1": [(3, "hi", b"there"), (4, "Q", b"P")]}, ) @@ -2002,7 +2002,7 @@ def test_in_29(self, is_in): else: self.assert_compile( expr, - "((a, b, c) NOT IN ([POSTCOMPILE_param_1]))", + "((a, b, c) NOT IN (__[POSTCOMPILE_param_1]))", checkparams={"param_1": [(3, "hi", b"there"), (4, "Q", b"P")]}, ) self.assert_compile( @@ -2029,7 +2029,7 @@ def test_in_empty_tuple(self, is_in, negate): if is_in: self.assert_compile( expr, - "(a, b, c) IN ([POSTCOMPILE_param_1])", + "(a, b, c) IN (__[POSTCOMPILE_param_1])", checkparams={"param_1": []}, ) self.assert_compile( @@ -2041,7 +2041,7 @@ def test_in_empty_tuple(self, is_in, negate): else: self.assert_compile( expr, - "((a, b, c) NOT IN ([POSTCOMPILE_param_1]))", + "((a, b, c) NOT IN (__[POSTCOMPILE_param_1]))", checkparams={"param_1": []}, ) self.assert_compile( @@ -2064,7 +2064,7 @@ def test_in_empty_single(self, is_in, negate): if is_in: self.assert_compile( expr, - "a IN ([POSTCOMPILE_a_1])", + "a IN (__[POSTCOMPILE_a_1])", checkparams={"a_1": []}, ) self.assert_compile( @@ -2076,7 +2076,7 @@ def test_in_empty_single(self, is_in, negate): else: self.assert_compile( expr, - "(a NOT IN ([POSTCOMPILE_a_1]))", + "(a NOT IN (__[POSTCOMPILE_a_1]))", checkparams={"a_1": []}, ) self.assert_compile( @@ -2094,7 +2094,8 @@ def test_in_self_plus_negated(self): stmt = and_(expr1, expr2) self.assert_compile( - stmt, "a IN ([POSTCOMPILE_a_1]) AND (a NOT IN ([POSTCOMPILE_a_2]))" + stmt, + "a IN (__[POSTCOMPILE_a_1]) AND (a NOT IN (__[POSTCOMPILE_a_2]))", ) self.assert_compile( stmt, "a IN (5) AND (a NOT IN (5))", literal_binds=True @@ -2108,7 +2109,8 @@ def test_in_self_plus_negated_empty(self): stmt = and_(expr1, expr2) self.assert_compile( - stmt, "a IN ([POSTCOMPILE_a_1]) AND (a NOT IN ([POSTCOMPILE_a_2]))" + stmt, + "a IN (__[POSTCOMPILE_a_1]) AND (a NOT IN (__[POSTCOMPILE_a_2]))", ) self.assert_compile( stmt, @@ -2120,7 +2122,7 @@ def test_in_set(self): s = {1, 2, 3} self.assert_compile( self.table1.c.myid.in_(s), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": list(s)}, ) @@ -2138,7 +2140,7 @@ def __iter__(self): seq = MySeq([1, 2, 3]) self.assert_compile( self.table1.c.myid.in_(seq), - "mytable.myid IN ([POSTCOMPILE_myid_1])", + "mytable.myid IN (__[POSTCOMPILE_myid_1])", checkparams={"myid_1": [1, 2, 3]}, ) diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index e68e98a3ccb..f0df92b7051 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -2948,7 +2948,7 @@ def test_annotate_expressions(self): (table1.c.col1 == 5, "table1.col1 = :col1_1"), ( table1.c.col1.in_([2, 3, 4]), - "table1.col1 IN ([POSTCOMPILE_col1_1])", + "table1.col1 IN (__[POSTCOMPILE_col1_1])", ), ]: eq_(str(expr), expected) diff --git a/test/sql/test_type_expressions.py b/test/sql/test_type_expressions.py index adcaef39cb4..e0e0858a450 100644 --- a/test/sql/test_type_expressions.py +++ b/test/sql/test_type_expressions.py @@ -191,7 +191,7 @@ def test_in_binds(self): ), "SELECT test_table.x, lower(test_table.y) AS y FROM " "test_table WHERE test_table.y IN " - "([POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", + "(__[POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", render_postcompile=False, ) From 0bb2076a90eddb097c1fb934947d8cbce6c11fb3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 10 Nov 2021 09:52:18 -0500 Subject: [PATCH 022/632] qualify asyncpg API tests for python 3.8 Getting TypeError: object MagicMock can't be used in 'await' expression for Python 3.7 and earlier. this test is not needed on all platforms it's confirming that two methods are present. Change-Id: If918add023c98c062ea0c1cd132a999647a2d35f (cherry picked from commit 2c41cd99d8a4d8bb27c975f5e2511ab2bf261110) --- lib/sqlalchemy/testing/requirements.py | 6 ++++++ test/dialect/postgresql/test_async_pg_py3k.py | 1 + 2 files changed, 7 insertions(+) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index e6f669e4c33..a0f262a760a 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1262,6 +1262,12 @@ def python37(self): def dataclasses(self): return self.python37 + @property + def python38(self): + return exclusions.only_if( + lambda: util.py38, "Python 3.8 or above required" + ) + @property def cpython(self): return exclusions.only_if( diff --git a/test/dialect/postgresql/test_async_pg_py3k.py b/test/dialect/postgresql/test_async_pg_py3k.py index 12917e97663..782cf33dd39 100644 --- a/test/dialect/postgresql/test_async_pg_py3k.py +++ b/test/dialect/postgresql/test_async_pg_py3k.py @@ -258,6 +258,7 @@ async def test_failed_rollback_recover( "setup_asyncpg_jsonb_codec", argnames="methname", ) + @testing.requires.python38 @async_test async def test_codec_registration( self, metadata, async_testing_engine, methname From 36420103f8dddae0c23a945eed5daf8bf1952f61 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 11 Nov 2021 10:42:08 -0500 Subject: [PATCH 023/632] - 1.4.27 --- doc/build/changelog/changelog_14.rst | 236 ++++++++++++++++++++- doc/build/changelog/unreleased_14/6023.rst | 6 - doc/build/changelog/unreleased_14/7167.rst | 11 - doc/build/changelog/unreleased_14/7224.rst | 15 -- doc/build/changelog/unreleased_14/7239.rst | 7 - doc/build/changelog/unreleased_14/7244.rst | 10 - doc/build/changelog/unreleased_14/7269.rst | 11 - doc/build/changelog/unreleased_14/7272.rst | 14 -- doc/build/changelog/unreleased_14/7274.rst | 37 ---- doc/build/changelog/unreleased_14/7281.rst | 8 - doc/build/changelog/unreleased_14/7283.rst | 9 - doc/build/changelog/unreleased_14/7284.rst | 13 -- doc/build/changelog/unreleased_14/7287.rst | 12 -- doc/build/changelog/unreleased_14/7291.rst | 8 - doc/build/changelog/unreleased_14/7292.rst | 20 -- doc/build/changelog/unreleased_14/7295.rst | 9 - doc/build/changelog/unreleased_14/7300.rst | 17 -- doc/build/changelog/unreleased_14/7304.rst | 10 - doc/build/conf.py | 4 +- 19 files changed, 237 insertions(+), 220 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/6023.rst delete mode 100644 doc/build/changelog/unreleased_14/7167.rst delete mode 100644 doc/build/changelog/unreleased_14/7224.rst delete mode 100644 doc/build/changelog/unreleased_14/7239.rst delete mode 100644 doc/build/changelog/unreleased_14/7244.rst delete mode 100644 doc/build/changelog/unreleased_14/7269.rst delete mode 100644 doc/build/changelog/unreleased_14/7272.rst delete mode 100644 doc/build/changelog/unreleased_14/7274.rst delete mode 100644 doc/build/changelog/unreleased_14/7281.rst delete mode 100644 doc/build/changelog/unreleased_14/7283.rst delete mode 100644 doc/build/changelog/unreleased_14/7284.rst delete mode 100644 doc/build/changelog/unreleased_14/7287.rst delete mode 100644 doc/build/changelog/unreleased_14/7291.rst delete mode 100644 doc/build/changelog/unreleased_14/7292.rst delete mode 100644 doc/build/changelog/unreleased_14/7295.rst delete mode 100644 doc/build/changelog/unreleased_14/7300.rst delete mode 100644 doc/build/changelog/unreleased_14/7304.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index a99a2d13f8c..3d7d4c37c62 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,241 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.27 - :include_notes_from: unreleased_14 + :released: November 11, 2021 + + .. change:: + :tags: bug, engine + :tickets: 7291 + + Fixed issue in future :class:`_future.Connection` object where the + :meth:`_future.Connection.execute` method would not accept a non-dict + mapping object, such as SQLAlchemy's own :class:`.RowMapping` or other + ``abc.collections.Mapping`` object as a parameter dictionary. + + .. change:: + :tags: bug, mysql, mariadb + :tickets: 7167 + + Reorganized the list of reserved words into two separate lists, one for + MySQL and one for MariaDB, so that these diverging sets of words can be + managed more accurately; adjusted the MySQL/MariaDB dialect to switch among + these lists based on either explicitly configured or + server-version-detected "MySQL" or "MariaDB" backend. Added all current + reserved words through MySQL 8 and current MariaDB versions including + recently added keywords like "lead" . Pull request courtesy Kevin Kirsche. + + .. change:: + :tags: bug, orm + :tickets: 7224 + + Fixed bug in "relationship to aliased class" feature introduced at + :ref:`relationship_aliased_class` where it was not possible to create a + loader strategy option targeting an attribute on the target using the + :func:`_orm.aliased` construct directly in a second loader option, such as + ``selectinload(A.aliased_bs).joinedload(aliased_b.cs)``, without explicitly + qualifying using :meth:`_orm.PropComparator.of_type` on the preceding + element of the path. Additionally, targeting the non-aliased class directly + would be accepted (inappropriately), but would silently fail, such as + ``selectinload(A.aliased_bs).joinedload(B.cs)``; this now raises an error + referring to the typing mismatch. + + + .. change:: + :tags: bug, schema + :tickets: 7295 + + Fixed issue in :class:`.Table` where the + :paramref:`.Table.implicit_returning` parameter would not be + accommodated correctly when passed along with + :paramref:`.Table.extend_existing` to augment an existing + :class:`.Table`. + + .. change:: + :tags: bug, postgresql, asyncpg + :tickets: 7283 + + Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" + PostgreSQL type instead of "numeric" so that the value ``float(inf)`` can + be accommodated. Added test suite support for persistence of the "inf" + value. + + + .. change:: + :tags: bug, engine, regression + :tickets: 7274 + :versions: 2.0.0b1 + + Fixed regression where the :meth:`_engine.CursorResult.fetchmany` method + would fail to autoclose a server-side cursor (i.e. when ``stream_results`` + or ``yield_per`` is in use, either Core or ORM oriented results) when the + results were fully exhausted. + + .. change:: + :tags: bug, orm + :tickets: 7274 + :versions: 2.0.0b1 + + All :class:`_result.Result` objects will now consistently raise + :class:`_exc.ResourceClosedError` if they are used after a hard close, + which includes the "hard close" that occurs after calling "single row or + value" methods like :meth:`_result.Result.first` and + :meth:`_result.Result.scalar`. This was already the behavior of the most + common class of result objects returned for Core statement executions, i.e. + those based on :class:`_engine.CursorResult`, so this behavior is not new. + However, the change has been extended to properly accommodate for the ORM + "filtering" result objects returned when using 2.0 style ORM queries, + which would previously behave in "soft closed" style of returning empty + results, or wouldn't actually "soft close" at all and would continue + yielding from the underlying cursor. + + As part of this change, also added :meth:`_result.Result.close` to the base + :class:`_result.Result` class and implemented it for the filtered result + implementations that are used by the ORM, so that it is possible to call + the :meth:`_engine.CursorResult.close` method on the underlying + :class:`_engine.CursorResult` when the the ``yield_per`` execution option + is in use to close a server side cursor before remaining ORM results have + been fetched. This was again already available for Core result sets but the + change makes it available for 2.0 style ORM results as well. + + + .. change:: + :tags: bug, mysql + :tickets: 7281 + :versions: 2.0.0b1 + + Fixed issue in MySQL :meth:`_mysql.Insert.on_duplicate_key_update` which + would render the wrong column name when an expression were used in a VALUES + expression. Pull request courtesy Cristian Sabaila. + + .. change:: + :tags: bug, sql, regression + :tickets: 7292 + + Fixed regression where the row objects returned for ORM queries, which are + now the normal :class:`_sql.Row` objects, would not be interpreted by the + :meth:`_sql.ColumnOperators.in_` operator as tuple values to be broken out + into individual bound parameters, and would instead pass them as single + values to the driver leading to failures. The change to the "expanding IN" + system now accommodates for the expression already being of type + :class:`.TupleType` and treats values accordingly if so. In the uncommon + case of using "tuple-in" with an untyped statement such as a textual + statement with no typing information, a tuple value is detected for values + that implement ``collections.abc.Sequence``, but that are not ``str`` or + ``bytes``, as always when testing for ``Sequence``. + + .. change:: + :tags: usecase, sql + + Added :class:`.TupleType` to the top level ``sqlalchemy`` import namespace. + + .. change:: + :tags: bug, sql + :tickets: 7269 + + Fixed issue where using the feature of using a string label for ordering or + grouping described at :ref:`tutorial_order_by_label` would fail to function + correctly if used on a :class:`.CTE` construct, when the CTE were embedded + inside of an enclosing :class:`_sql.Select` statement that itself was set + up as a scalar subquery. + + + + .. change:: + :tags: bug, orm, regression + :tickets: 7239 + + Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function + correctly on a :class:`_orm.Query` that was produced from + :meth:`_orm.Query.union`, :meth:`_orm.Query.from_self` or similar. + + .. change:: + :tags: bug, orm + :tickets: 7304 + + Fixed issue where deferred polymorphic loading of attributes from a + joined-table inheritance subclass would fail to populate the attribute + correctly if the :func:`_orm.load_only` option were used to originally + exclude that attribute, in the case where the load_only were descending + from a relationship loader option. The fix allows that other valid options + such as ``defer(..., raiseload=True)`` etc. still function as expected. + + .. change:: + :tags: postgresql, usecase, asyncpg + :tickets: 7284 + :versions: 2.0.0b1 + + Added overridable methods ``PGDialect_asyncpg.setup_asyncpg_json_codec`` + and ``PGDialect_asyncpg.setup_asyncpg_jsonb_codec`` codec, which handle the + required task of registering JSON/JSONB codecs for these datatypes when + using asyncpg. The change is that methods are broken out as individual, + overridable methods to support third party dialects that need to alter or + disable how these particular codecs are set up. + + + + .. change:: + :tags: bug, engine + :tickets: 7272 + :versions: 2.0.0b1 + + Fixed issue in future :class:`_future.Engine` where calling upon + :meth:`_future.Engine.begin` and entering the context manager would not + close the connection if the actual BEGIN operation failed for some reason, + such as an event handler raising an exception; this use case failed to be + tested for the future version of the engine. Note that the "future" context + managers which handle ``begin()`` blocks in Core and ORM don't actually run + the "BEGIN" operation until the context managers are actually entered. This + is different from the legacy version which runs the "BEGIN" operation up + front. + + .. change:: + :tags: mssql, bug + :tickets: 7300 + + Adjusted the compiler's generation of "post compile" symbols including + those used for "expanding IN" as well as for the "schema translate map" to + not be based directly on plain bracketed strings with underscores, as this + conflicts directly with SQL Server's quoting format of also using brackets, + which produces false matches when the compiler replaces "post compile" and + "schema translate" symbols. The issue created easy to reproduce examples + both with the :meth:`.Inspector.get_schema_names` method when used in + conjunction with the + :paramref:`_engine.Connection.execution_options.schema_translate_map` + feature, as well in the unlikely case that a symbol overlapping with the + internal name "POSTCOMPILE" would be used with a feature like "expanding + in". + + + .. change:: + :tags: postgresql, pg8000 + :tickets: 7167 + + Improve array handling when using PostgreSQL with the + pg8000 dialect. + + .. change:: + :tags: bug, orm, regression + :tickets: 7244 + + Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function + correctly when :meth:`_orm.Query.join` were joined to an entity which made + use of :meth:`_orm.PropComparator.of_type` to specify an aliased version of + the target entity. The issue also applies to future style ORM queries + constructed with :func:`_sql.select`. + + + .. change:: + :tags: bug, sql, regression + :tickets: 7287 + + Fixed regression where the :func:`_sql.text` construct would no longer be + accepted as a target case in the "whens" list within a :func:`_sql.case` + construct. The regression appears related to an attempt to guard against + some forms of literal values that were considered to be ambiguous when + passed here; however, there's no reason the target cases shouldn't be + interpreted as open-ended SQL expressions just like anywhere else, and a + literal string or tuple will be converted to a bound parameter as would be + the case elsewhere. .. changelog:: :version: 1.4.26 diff --git a/doc/build/changelog/unreleased_14/6023.rst b/doc/build/changelog/unreleased_14/6023.rst deleted file mode 100644 index 88d9777ba51..00000000000 --- a/doc/build/changelog/unreleased_14/6023.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: postgresql, pg8000 - :tickets: 7167 - - Improve array handling when using PostgreSQL with the - pg8000 dialect. diff --git a/doc/build/changelog/unreleased_14/7167.rst b/doc/build/changelog/unreleased_14/7167.rst deleted file mode 100644 index aedc8086c01..00000000000 --- a/doc/build/changelog/unreleased_14/7167.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, mysql, mariadb - :tickets: 7167 - - Reorganized the list of reserved words into two separate lists, one for - MySQL and one for MariaDB, so that these diverging sets of words can be - managed more accurately; adjusted the MySQL/MariaDB dialect to switch among - these lists based on either explicitly configured or - server-version-detected "MySQL" or "MariaDB" backend. Added all current - reserved words through MySQL 8 and current MariaDB versions including - recently added keywords like "lead" . Pull request courtesy Kevin Kirsche. diff --git a/doc/build/changelog/unreleased_14/7224.rst b/doc/build/changelog/unreleased_14/7224.rst deleted file mode 100644 index 3f10a60883d..00000000000 --- a/doc/build/changelog/unreleased_14/7224.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7224 - - Fixed bug in "relationship to aliased class" feature introduced at - :ref:`relationship_aliased_class` where it was not possible to create a - loader strategy option targeting an attribute on the target using the - :func:`_orm.aliased` construct directly in a second loader option, such as - ``selectinload(A.aliased_bs).joinedload(aliased_b.cs)``, without explicitly - qualifying using :meth:`_orm.PropComparator.of_type` on the preceding - element of the path. Additionally, targeting the non-aliased class directly - would be accepted (inappropriately), but would silently fail, such as - ``selectinload(A.aliased_bs).joinedload(B.cs)``; this now raises an error - referring to the typing mismatch. - diff --git a/doc/build/changelog/unreleased_14/7239.rst b/doc/build/changelog/unreleased_14/7239.rst deleted file mode 100644 index 14ef19118fd..00000000000 --- a/doc/build/changelog/unreleased_14/7239.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7239 - - Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function - correctly on a :class:`_orm.Query` that was produced from - :meth:`_orm.Query.union`, :meth:`_orm.Query.from_self` or similar. diff --git a/doc/build/changelog/unreleased_14/7244.rst b/doc/build/changelog/unreleased_14/7244.rst deleted file mode 100644 index 92352c6001b..00000000000 --- a/doc/build/changelog/unreleased_14/7244.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7244 - - Fixed 1.4 regression where :meth:`_orm.Query.filter_by` would not function - correctly when :meth:`_orm.Query.join` were joined to an entity which made - use of :meth:`_orm.PropComparator.of_type` to specify an aliased version of - the target entity. The issue also applies to future style ORM queries - constructed with :func:`_sql.select`. - diff --git a/doc/build/changelog/unreleased_14/7269.rst b/doc/build/changelog/unreleased_14/7269.rst deleted file mode 100644 index 6bbd126052d..00000000000 --- a/doc/build/changelog/unreleased_14/7269.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7269 - - Fixed issue where using the feature of using a string label for ordering or - grouping described at :ref:`tutorial_order_by_label` would fail to function - correctly if used on a :class:`.CTE` construct, when the CTE were embedded - inside of an enclosing :class:`_sql.Select` statement that itself was set - up as a scalar subquery. - - diff --git a/doc/build/changelog/unreleased_14/7272.rst b/doc/build/changelog/unreleased_14/7272.rst deleted file mode 100644 index a38aacdaa8e..00000000000 --- a/doc/build/changelog/unreleased_14/7272.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7272 - :versions: 2.0.0b1 - - Fixed issue in future :class:`_future.Engine` where calling upon - :meth:`_future.Engine.begin` and entering the context manager would not - close the connection if the actual BEGIN operation failed for some reason, - such as an event handler raising an exception; this use case failed to be - tested for the future version of the engine. Note that the "future" context - managers which handle ``begin()`` blocks in Core and ORM don't actually run - the "BEGIN" operation until the context managers are actually entered. This - is different from the legacy version which runs the "BEGIN" operation up - front. diff --git a/doc/build/changelog/unreleased_14/7274.rst b/doc/build/changelog/unreleased_14/7274.rst deleted file mode 100644 index 08e961a7290..00000000000 --- a/doc/build/changelog/unreleased_14/7274.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. change:: - :tags: bug, engine, regression - :tickets: 7274 - :versions: 2.0.0b1 - - Fixed regression where the :meth:`_engine.CursorResult.fetchmany` method - would fail to autoclose a server-side cursor (i.e. when ``stream_results`` - or ``yield_per`` is in use, either Core or ORM oriented results) when the - results were fully exhausted. - -.. change:: - :tags: bug, orm - :tickets: 7274 - :versions: 2.0.0b1 - - All :class:`_result.Result` objects will now consistently raise - :class:`_exc.ResourceClosedError` if they are used after a hard close, - which includes the "hard close" that occurs after calling "single row or - value" methods like :meth:`_result.Result.first` and - :meth:`_result.Result.scalar`. This was already the behavior of the most - common class of result objects returned for Core statement executions, i.e. - those based on :class:`_engine.CursorResult`, so this behavior is not new. - However, the change has been extended to properly accommodate for the ORM - "filtering" result objects returned when using 2.0 style ORM queries, - which would previously behave in "soft closed" style of returning empty - results, or wouldn't actually "soft close" at all and would continue - yielding from the underlying cursor. - - As part of this change, also added :meth:`_result.Result.close` to the base - :class:`_result.Result` class and implemented it for the filtered result - implementations that are used by the ORM, so that it is possible to call - the :meth:`_engine.CursorResult.close` method on the underlying - :class:`_engine.CursorResult` when the the ``yield_per`` execution option - is in use to close a server side cursor before remaining ORM results have - been fetched. This was again already available for Core result sets but the - change makes it available for 2.0 style ORM results as well. - diff --git a/doc/build/changelog/unreleased_14/7281.rst b/doc/build/changelog/unreleased_14/7281.rst deleted file mode 100644 index a5ca9a1622c..00000000000 --- a/doc/build/changelog/unreleased_14/7281.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 7281 - :versions: 2.0.0b1 - - Fixed issue in MySQL :meth:`_mysql.Insert.on_duplicate_key_update` which - would render the wrong column name when an expression were used in a VALUES - expression. Pull request courtesy Cristian Sabaila. diff --git a/doc/build/changelog/unreleased_14/7283.rst b/doc/build/changelog/unreleased_14/7283.rst deleted file mode 100644 index 4fc86b4ca97..00000000000 --- a/doc/build/changelog/unreleased_14/7283.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, postgresql, asyncpg - :tickets: 7283 - - Changed the asyncpg dialect to bind the :class:`.Float` type to the "float" - PostgreSQL type instead of "numeric" so that the value ``float(inf)`` can - be accommodated. Added test suite support for persistence of the "inf" - value. - diff --git a/doc/build/changelog/unreleased_14/7284.rst b/doc/build/changelog/unreleased_14/7284.rst deleted file mode 100644 index b5d23739c8f..00000000000 --- a/doc/build/changelog/unreleased_14/7284.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: postgresql, usecase, asyncpg - :tickets: 7284 - :versions: 2.0.0b1 - - Added overridable methods ``PGDialect_asyncpg.setup_asyncpg_json_codec`` - and ``PGDialect_asyncpg.setup_asyncpg_jsonb_codec`` codec, which handle the - required task of registering JSON/JSONB codecs for these datatypes when - using asyncpg. The change is that methods are broken out as individual, - overridable methods to support third party dialects that need to alter or - disable how these particular codecs are set up. - - diff --git a/doc/build/changelog/unreleased_14/7287.rst b/doc/build/changelog/unreleased_14/7287.rst deleted file mode 100644 index 14c72a8aff1..00000000000 --- a/doc/build/changelog/unreleased_14/7287.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql, regression - :tickets: 7287 - - Fixed regression where the :func:`_sql.text` construct would no longer be - accepted as a target case in the "whens" list within a :func:`_sql.case` - construct. The regression appears related to an attempt to guard against - some forms of literal values that were considered to be ambiguous when - passed here; however, there's no reason the target cases shouldn't be - interpreted as open-ended SQL expressions just like anywhere else, and a - literal string or tuple will be converted to a bound parameter as would be - the case elsewhere. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7291.rst b/doc/build/changelog/unreleased_14/7291.rst deleted file mode 100644 index add383ee861..00000000000 --- a/doc/build/changelog/unreleased_14/7291.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7291 - - Fixed issue in future :class:`_future.Connection` object where the - :meth:`_future.Connection.execute` method would not accept a non-dict - mapping object, such as SQLAlchemy's own :class:`.RowMapping` or other - ``abc.collections.Mapping`` object as a parameter dictionary. diff --git a/doc/build/changelog/unreleased_14/7292.rst b/doc/build/changelog/unreleased_14/7292.rst deleted file mode 100644 index e75d11e813f..00000000000 --- a/doc/build/changelog/unreleased_14/7292.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. change:: - :tags: bug, sql, regression - :tickets: 7292 - - Fixed regression where the row objects returned for ORM queries, which are - now the normal :class:`_sql.Row` objects, would not be interpreted by the - :meth:`_sql.ColumnOperators.in_` operator as tuple values to be broken out - into individual bound parameters, and would instead pass them as single - values to the driver leading to failures. The change to the "expanding IN" - system now accommodates for the expression already being of type - :class:`.TupleType` and treats values accordingly if so. In the uncommon - case of using "tuple-in" with an untyped statement such as a textual - statement with no typing information, a tuple value is detected for values - that implement ``collections.abc.Sequence``, but that are not ``str`` or - ``bytes``, as always when testing for ``Sequence``. - -.. change:: - :tags: usecase, sql - - Added :class:`.TupleType` to the top level ``sqlalchemy`` import namespace. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7295.rst b/doc/build/changelog/unreleased_14/7295.rst deleted file mode 100644 index 058c9d16a58..00000000000 --- a/doc/build/changelog/unreleased_14/7295.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, schema - :tickets: 7295 - - Fixed issue in :class:`.Table` where the - :paramref:`.Table.implicit_returning` parameter would not be - accommodated correctly when passed along with - :paramref:`.Table.extend_existing` to augment an existing - :class:`.Table`. diff --git a/doc/build/changelog/unreleased_14/7300.rst b/doc/build/changelog/unreleased_14/7300.rst deleted file mode 100644 index d9061af0941..00000000000 --- a/doc/build/changelog/unreleased_14/7300.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. change:: - :tags: mssql, bug - :tickets: 7300 - - Adjusted the compiler's generation of "post compile" symbols including - those used for "expanding IN" as well as for the "schema translate map" to - not be based directly on plain bracketed strings with underscores, as this - conflicts directly with SQL Server's quoting format of also using brackets, - which produces false matches when the compiler replaces "post compile" and - "schema translate" symbols. The issue created easy to reproduce examples - both with the :meth:`.Inspector.get_schema_names` method when used in - conjunction with the - :paramref:`_engine.Connection.execution_options.schema_translate_map` - feature, as well in the unlikely case that a symbol overlapping with the - internal name "POSTCOMPILE" would be used with a feature like "expanding - in". - diff --git a/doc/build/changelog/unreleased_14/7304.rst b/doc/build/changelog/unreleased_14/7304.rst deleted file mode 100644 index 44d188a30ee..00000000000 --- a/doc/build/changelog/unreleased_14/7304.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7304 - - Fixed issue where deferred polymorphic loading of attributes from a - joined-table inheritance subclass would fail to populate the attribute - correctly if the :func:`_orm.load_only` option were used to originally - exclude that attribute, in the case where the load_only were descending - from a relationship loader option. The fix allows that other valid options - such as ``defer(..., raiseload=True)`` etc. still function as expected. diff --git a/doc/build/conf.py b/doc/build/conf.py index 169d695d0f5..d4b6bea0368 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -203,9 +203,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.26" +release = "1.4.27" -release_date = "October 19, 2021" +release_date = "November 11, 2021" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 80b38b45c75f347af70d1be95c27704bcfc6b6bd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 11 Nov 2021 10:48:50 -0500 Subject: [PATCH 024/632] Version 1.4.28 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3d7d4c37c62..758d925aa3f 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.28 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.27 :released: November 11, 2021 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index d5cc233243b..e962fc3b8fe 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.27" +__version__ = "1.4.28" def __go(lcls): From 24a53fd8fce2cdfb0154caa687ef893bcff120a7 Mon Sep 17 00:00:00 2001 From: Eric Masseran Date: Tue, 2 Nov 2021 16:40:04 -0400 Subject: [PATCH 025/632] Add Non linear CTE support "Compound select" methods like :meth:`_sql.Select.union`, :meth:`_sql.Select.intersect_all` etc. now accept ``*other`` as an argument rather than ``other`` to allow for multiple additional SELECTs to be compounded with the parent statement at once. In particular, the change as applied to :meth:`_sql.CTE.union` and :meth:`_sql.CTE.union_all` now allow for a so-called "non-linear CTE" to be created with the :class:`_sql.CTE` construct, whereas previously there was no way to have more than two CTE sub-elements in a UNION together while still correctly calling upon the CTE in recursive fashion. Pull request courtesy Eric Masseran. Allow: ```sql WITH RECURSIVE nodes(x) AS ( SELECT 59 UNION SELECT aa FROM edge JOIN nodes ON bb=x UNION SELECT bb FROM edge JOIN nodes ON aa=x ) SELECT x FROM nodes; ``` Based on @zzzeek suggestion: https://github.com/sqlalchemy/sqlalchemy/pull/7133#issuecomment-933882348 Fixes: #7259 Closes: #7260 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7260 Pull-request-sha: 2565a5fd4b1940e92125e53aeaa731cc682f49bb Change-Id: I685c8379762b5fb6ab4107ff8f4d8a4de70c0ca6 (cherry picked from commit 958f902b1fc528fed0be550bc573545de47ed854) --- doc/build/changelog/unreleased_14/7259.rst | 13 ++ lib/sqlalchemy/sql/selectable.py | 184 +++++++++++++++++---- test/sql/test_cte.py | 47 ++++++ test/sql/test_select.py | 23 ++- 4 files changed, 237 insertions(+), 30 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7259.rst diff --git a/doc/build/changelog/unreleased_14/7259.rst b/doc/build/changelog/unreleased_14/7259.rst new file mode 100644 index 00000000000..477714edd9c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7259.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: sql, usecase + :tickets: 7259 + + "Compound select" methods like :meth:`_sql.Select.union`, + :meth:`_sql.Select.intersect_all` etc. now accept ``*other`` as an argument + rather than ``other`` to allow for multiple additional SELECTs to be + compounded with the parent statement at once. In particular, the change as + applied to :meth:`_sql.CTE.union` and :meth:`_sql.CTE.union_all` now allow + for a so-called "non-linear CTE" to be created with the :class:`_sql.CTE` + construct, whereas previously there was no way to have more than two CTE + sub-elements in a UNION together while still correctly calling upon the CTE + in recursive fashion. Pull request courtesy Eric Masseran. diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 91436029706..95fca267c65 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -2121,9 +2121,23 @@ def alias(self, name=None, flat=False): _suffixes=self._suffixes, ) - def union(self, other): + def union(self, *other): + r"""Return a new :class:`_expression.CTE` with a SQL ``UNION`` + of the original CTE against the given selectables provided + as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 multiple elements are now accepted. + + .. seealso:: + + :meth:`_sql.HasCTE.cte` - examples of calling styles + + """ return CTE._construct( - self.element.union(other), + self.element.union(*other), name=self.name, recursive=self.recursive, nesting=self.nesting, @@ -2132,9 +2146,23 @@ def union(self, other): _suffixes=self._suffixes, ) - def union_all(self, other): + def union_all(self, *other): + r"""Return a new :class:`_expression.CTE` with a SQL ``UNION ALL`` + of the original CTE against the given selectables provided + as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 multiple elements are now accepted. + + .. seealso:: + + :meth:`_sql.HasCTE.cte` - examples of calling styles + + """ return CTE._construct( - self.element.union_all(other), + self.element.union_all(*other), name=self.name, recursive=self.recursive, nesting=self.nesting, @@ -2396,7 +2424,7 @@ def cte(self, name=None, recursive=False, nesting=False): connection.execute(upsert) - Example 4, Nesting CTE:: + Example 4, Nesting CTE (SQLAlchemy 1.4.24 and above):: value_a = select( literal("root").label("n") @@ -2426,6 +2454,44 @@ def cte(self, name=None, recursive=False, nesting=False): SELECT value_a.n AS a, value_b.n AS b FROM value_a, value_b + Example 5, Non-Linear CTE (SQLAlchemy 1.4.28 and above):: + + edge = Table( + "edge", + metadata, + Column("id", Integer, primary_key=True), + Column("left", Integer), + Column("right", Integer), + ) + + root_node = select(literal(1).label("node")).cte( + "nodes", recursive=True + ) + + left_edge = select(edge.c.left).join( + root_node, edge.c.right == root_node.c.node + ) + right_edge = select(edge.c.right).join( + root_node, edge.c.left == root_node.c.node + ) + + subgraph_cte = root_node.union(left_edge, right_edge) + + subgraph = select(subgraph_cte) + + The above query will render 2 UNIONs inside the recursive CTE:: + + WITH RECURSIVE nodes(node) AS ( + SELECT 1 AS node + UNION + SELECT edge."left" AS "left" + FROM edge JOIN nodes ON edge."right" = nodes.node + UNION + SELECT edge."right" AS "right" + FROM edge JOIN nodes ON edge."left" = nodes.node + ) + SELECT nodes.node FROM nodes + .. seealso:: :meth:`_orm.Query.cte` - ORM version of @@ -6270,47 +6336,107 @@ def self_group(self, against=None): else: return SelectStatementGrouping(self) - def union(self, other, **kwargs): - """Return a SQL ``UNION`` of this select() construct against - the given selectable. + def union(self, *other, **kwargs): + r"""Return a SQL ``UNION`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_union(self, other, **kwargs) + return CompoundSelect._create_union(self, *other, **kwargs) + + def union_all(self, *other, **kwargs): + r"""Return a SQL ``UNION ALL`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. - def union_all(self, other, **kwargs): - """Return a SQL ``UNION ALL`` of this select() construct against - the given selectable. + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_union_all(self, other, **kwargs) + return CompoundSelect._create_union_all(self, *other, **kwargs) + + def except_(self, *other, **kwargs): + r"""Return a SQL ``EXCEPT`` of this select() construct against + the given selectable provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. - def except_(self, other, **kwargs): - """Return a SQL ``EXCEPT`` of this select() construct against - the given selectable. + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_except(self, other, **kwargs) + return CompoundSelect._create_except(self, *other, **kwargs) - def except_all(self, other, **kwargs): - """Return a SQL ``EXCEPT ALL`` of this select() construct against - the given selectable. + def except_all(self, *other, **kwargs): + r"""Return a SQL ``EXCEPT ALL`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_except_all(self, other, **kwargs) + return CompoundSelect._create_except_all(self, *other, **kwargs) + + def intersect(self, *other, **kwargs): + r"""Return a SQL ``INTERSECT`` of this select() construct against + the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. - def intersect(self, other, **kwargs): - """Return a SQL ``INTERSECT`` of this select() construct against - the given selectable. + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. + + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_intersect(self, other, **kwargs) + return CompoundSelect._create_intersect(self, *other, **kwargs) + + def intersect_all(self, *other, **kwargs): + r"""Return a SQL ``INTERSECT ALL`` of this select() construct + against the given selectables provided as positional arguments. + + :param \*other: one or more elements with which to create a + UNION. + + .. versionchanged:: 1.4.28 + + multiple elements are now accepted. - def intersect_all(self, other, **kwargs): - """Return a SQL ``INTERSECT ALL`` of this select() construct - against the given selectable. + :param \**kwargs: keyword arguments are forwarded to the constructor + for the newly created :class:`_sql.CompoundSelect` object. """ - return CompoundSelect._create_intersect_all(self, other, **kwargs) + return CompoundSelect._create_intersect_all(self, *other, **kwargs) @property @util.deprecated_20( diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index 10fe81b5530..df9f065acc8 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -1769,6 +1769,53 @@ def test_no_alias_construct(self): "foo", ) + def test_recursive_cte_with_multiple_union(self): + root_query = select(literal(1).label("val")).cte( + "increasing", recursive=True + ) + rec_part_1 = select((root_query.c.val + 3).label("val")).where( + root_query.c.val < 15 + ) + rec_part_2 = select((root_query.c.val + 5).label("val")).where( + root_query.c.val < 15 + ) + union_rec_query = root_query.union(rec_part_1, rec_part_2) + union_stmt = select(union_rec_query) + self.assert_compile( + union_stmt, + "WITH RECURSIVE increasing(val) AS " + "(SELECT :param_1 AS val " + "UNION SELECT increasing.val + :val_1 AS val FROM increasing " + "WHERE increasing.val < :val_2 " + "UNION SELECT increasing.val + :val_3 AS val FROM increasing " + "WHERE increasing.val < :val_4) " + "SELECT increasing.val FROM increasing", + ) + + def test_recursive_cte_with_multiple_union_all(self): + root_query = select(literal(1).label("val")).cte( + "increasing", recursive=True + ) + rec_part_1 = select((root_query.c.val + 3).label("val")).where( + root_query.c.val < 15 + ) + rec_part_2 = select((root_query.c.val + 5).label("val")).where( + root_query.c.val < 15 + ) + + union_all_rec_query = root_query.union_all(rec_part_1, rec_part_2) + union_all_stmt = select(union_all_rec_query) + self.assert_compile( + union_all_stmt, + "WITH RECURSIVE increasing(val) AS " + "(SELECT :param_1 AS val " + "UNION ALL SELECT increasing.val + :val_1 AS val FROM increasing " + "WHERE increasing.val < :val_2 " + "UNION ALL SELECT increasing.val + :val_3 AS val FROM increasing " + "WHERE increasing.val < :val_4) " + "SELECT increasing.val FROM increasing", + ) + class NestingCTETest(fixtures.TestBase, AssertsCompiledSQL): diff --git a/test/sql/test_select.py b/test/sql/test_select.py index 17b47d96de7..c9abb7fb8b4 100644 --- a/test/sql/test_select.py +++ b/test/sql/test_select.py @@ -8,15 +8,16 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table +from sqlalchemy import testing from sqlalchemy import tuple_ from sqlalchemy import union from sqlalchemy.sql import column +from sqlalchemy.sql import literal from sqlalchemy.sql import table from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import fixtures - table1 = table( "mytable", column("myid", Integer), @@ -412,3 +413,23 @@ def test_select_tuple_subquery(self): "SELECT anon_1.name FROM (SELECT mytable.name AS name, " "(mytable.myid, mytable.name) AS anon_2 FROM mytable) AS anon_1", ) + + @testing.combinations( + ("union_all", "UNION ALL"), + ("union", "UNION"), + ("intersect_all", "INTERSECT ALL"), + ("intersect", "INTERSECT"), + ("except_all", "EXCEPT ALL"), + ("except_", "EXCEPT"), + ) + def test_select_multiple_compound_elements(self, methname, joiner): + stmt = select(literal(1)) + meth = getattr(stmt, methname) + stmt = meth(select(literal(2)), select(literal(3))) + + self.assert_compile( + stmt, + "SELECT :param_1 AS anon_1" + " %(joiner)s SELECT :param_2 AS anon_2" + " %(joiner)s SELECT :param_3 AS anon_3" % {"joiner": joiner}, + ) From 77f987022c0768a553be1f052c735c0bf3428aa1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 17 Nov 2021 15:08:29 -0500 Subject: [PATCH 026/632] generalize cache_ok to UserDefinedType Extended the ``cache_ok`` flag and corresponding warning message if this flag is not defined, a behavior first established for :class:`.TypeDecorator` as part of :ticket:`6436`, to also take place for :class:`.UserDefinedType`, by generalizing the flag and associated caching logic to a new common base for these two types, :class:`.ExternalType`. The change means any current :class:`.UserDefinedType` will now cause SQL statement caching to no longer take place for statements which make use of the datatype, along with a warning being emitted, unless the class defines the :attr:`.UserDefinedType.cache_ok` flag as True. If the datatype cannot form a deterministic, hashable cache key derived from its arguments, it may return False which will continue to keep caching disabled but will suppress the warning. In particular, custom datatypes currently used in packages such as SQLAlchemy-utils will need to implement this flag. The issue was observed as a result of a SQLAlchemy-utils datatype that is not currently cacheable. Fixes: #7319 Change-Id: Ie0b5d4587df87bfe66d2fe7cd4585c3882584575 (cherry picked from commit 4761e6878b127f7d5fb09addaae15426edbb0b73) --- doc/build/changelog/unreleased_14/7319.rst | 24 ++ doc/build/core/custom_types.rst | 2 + doc/build/core/type_api.rst | 2 + lib/sqlalchemy/sql/type_api.py | 249 ++++++++++++++++----- lib/sqlalchemy/types.py | 2 + test/sql/test_compare.py | 18 +- test/sql/test_types.py | 10 + 7 files changed, 242 insertions(+), 65 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7319.rst diff --git a/doc/build/changelog/unreleased_14/7319.rst b/doc/build/changelog/unreleased_14/7319.rst new file mode 100644 index 00000000000..0c2b19d3148 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7319.rst @@ -0,0 +1,24 @@ +.. change:: + :tags: bug, types, regression + :tickets: 7319 + + Extended the :attr:`.TypeDecorator.cache_ok` attribute and corresponding + warning message if this flag is not defined, a behavior first established + for :class:`.TypeDecorator` as part of :ticket:`6436`, to also take place + for :class:`.UserDefinedType`, by generalizing the flag and associated + caching logic to a new common base for these two types, + :class:`.ExternalType` to create :attr:`.UserDefinedType.cache_ok`. + + The change means any current :class:`.UserDefinedType` will now cause SQL + statement caching to no longer take place for statements which make use of + the datatype, along with a warning being emitted, unless the class defines + the :attr:`.UserDefinedType.cache_ok` flag as True. If the datatype cannot + form a deterministic, hashable cache key derived from its arguments, + the attribute may be set to False which will continue to keep caching disabled but will suppress the + warning. In particular, custom datatypes currently used in packages such as + SQLAlchemy-utils will need to implement this flag. The issue was observed + as a result of a SQLAlchemy-utils datatype that is not currently cacheable. + + .. seealso:: + + :attr:`.ExternalType.cache_ok` diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 6ec31ce089e..1b6a91be186 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -67,6 +67,7 @@ to and/or from the database is required. .. autoclass:: TypeDecorator :members: + .. autoattribute:: cache_ok TypeDecorator Recipes --------------------- @@ -594,6 +595,7 @@ is needed, use :class:`.TypeDecorator` instead. .. autoclass:: UserDefinedType :members: + .. autoattribute:: cache_ok .. _custom_and_decorated_types_reflection: diff --git a/doc/build/core/type_api.rst b/doc/build/core/type_api.rst index 0dd1b492053..2586b2b732a 100644 --- a/doc/build/core/type_api.rst +++ b/doc/build/core/type_api.rst @@ -18,6 +18,8 @@ Base Type API .. autoclass:: NullType +.. autoclass:: ExternalType + :members: .. autoclass:: Variant :members: with_variant, __init__ diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 2a4688bcceb..f3fed02d24f 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -815,7 +815,180 @@ class VisitableCheckKWArg(util.EnsureKWArgType, TraversibleType): pass -class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): +class ExternalType(object): + """mixin that defines attributes and behaviors specific to third-party + datatypes. + + "Third party" refers to datatypes that are defined outside the scope + of SQLAlchemy within either end-user application code or within + external extensions to SQLAlchemy. + + Subclasses currently include :class:`.TypeDecorator` and + :class:`.UserDefinedType`. + + .. versionadded:: 1.4.28 + + """ + + cache_ok = None + """Indicate if statements using this :class:`.ExternalType` are "safe to + cache". + + The default value ``None`` will emit a warning and then not allow caching + of a statement which includes this type. Set to ``False`` to disable + statements using this type from being cached at all without a warning. + When set to ``True``, the object's class and selected elements from its + state will be used as part of the cache key. For example, using a + :class:`.TypeDecorator`:: + + class MyType(TypeDecorator): + impl = String + + cache_ok = True + + def __init__(self, choices): + self.choices = tuple(choices) + self.internal_only = True + + The cache key for the above type would be equivalent to:: + + >>> MyType(["a", "b", "c"])._static_cache_key + (, ('choices', ('a', 'b', 'c'))) + + The caching scheme will extract attributes from the type that correspond + to the names of parameters in the ``__init__()`` method. Above, the + "choices" attribute becomes part of the cache key but "internal_only" + does not, because there is no parameter named "internal_only". + + The requirements for cacheable elements is that they are hashable + and also that they indicate the same SQL rendered for expressions using + this type every time for a given cache value. + + To accommodate for datatypes that refer to unhashable structures such + as dictionaries, sets and lists, these objects can be made "cacheable" + by assigning hashable structures to the attributes whose names + correspond with the names of the arguments. For example, a datatype + which accepts a dictionary of lookup values may publish this as a sorted + series of tuples. Given a previously un-cacheable type as:: + + class LookupType(UserDefinedType): + '''a custom type that accepts a dictionary as a parameter. + + this is the non-cacheable version, as "self.lookup" is not + hashable. + + ''' + + def __init__(self, lookup): + self.lookup = lookup + + def get_col_spec(self, **kw): + return "VARCHAR(255)" + + def bind_processor(self, dialect): + # ... works with "self.lookup" ... + + Where "lookup" is a dictionary. The type will not be able to generate + a cache key:: + + >>> type_ = LookupType({"a": 10, "b": 20}) + >>> type_._static_cache_key + :1: SAWarning: UserDefinedType LookupType({'a': 10, 'b': 20}) will not + produce a cache key because the ``cache_ok`` flag is not set to True. + Set this flag to True if this type object's state is safe to use + in a cache key, or False to disable this warning. + symbol('no_cache') + + If we **did** set up such a cache key, it wouldn't be usable. We would + get a tuple structure that contains a dictionary inside of it, which + cannot itself be used as a key in a "cache dictionary" such as SQLAlchemy's + statement cache, since Python dictionaries aren't hashable:: + + >>> # set cache_ok = True + >>> type_.cache_ok = True + + >>> # this is the cache key it would generate + >>> key = type_._static_cache_key + >>> key + (, ('lookup', {'a': 10, 'b': 20})) + + >>> # however this key is not hashable, will fail when used with + >>> # SQLAlchemy statement cache + >>> some_cache = {key: "some sql value"} + Traceback (most recent call last): File "", line 1, + in TypeError: unhashable type: 'dict' + + The type may be made cacheable by assigning a sorted tuple of tuples + to the ".lookup" attribute:: + + class LookupType(UserDefinedType): + '''a custom type that accepts a dictionary as a parameter. + + The dictionary is stored both as itself in a private variable, + and published in a public variable as a sorted tuple of tuples, + which is hashable and will also return the same value for any + two equivalent dictionaries. Note it assumes the keys and + values of the dictionary are themselves hashable. + + ''' + + cache_ok = True + + def __init__(self, lookup): + self._lookup = lookup + + # assume keys/values of "lookup" are hashable; otherwise + # they would also need to be converted in some way here + self.lookup = tuple( + (key, lookup[key]) for key in sorted(lookup) + ) + + def get_col_spec(self, **kw): + return "VARCHAR(255)" + + def bind_processor(self, dialect): + # ... works with "self._lookup" ... + + Where above, the cache key for ``LookupType({"a": 10, "b": 20})`` will be:: + + >>> LookupType({"a": 10, "b": 20})._static_cache_key + (, ('lookup', (('a', 10), ('b', 20)))) + + .. versionadded:: 1.4.14 - added the ``cache_ok`` flag to allow + some configurability of caching for :class:`.TypeDecorator` classes. + + .. versionadded:: 1.4.28 - added the :class:`.ExternalType` mixin which + generalizes the ``cache_ok`` flag to both the :class:`.TypeDecorator` + and :class:`.UserDefinedType` classes. + + .. seealso:: + + :ref:`sql_caching` + + """ # noqa: E501 + + @property + def _static_cache_key(self): + if self.cache_ok is None: + subtype_idx = self.__class__.__mro__.index(ExternalType) + subtype = self.__class__.__mro__[max(subtype_idx - 1, 0)] + + util.warn( + "%s %r will not produce a cache key because " + "the ``cache_ok`` flag is not set to True. " + "Set this flag to True if this type object's " + "state is safe to use in a cache key, or False to " + "disable this warning." % (subtype.__name__, self) + ) + elif self.cache_ok is True: + return super(ExternalType, self)._static_cache_key + + return NO_CACHE + + +class UserDefinedType( + util.with_metaclass(VisitableCheckKWArg, ExternalType, TypeEngine) +): """Base for user defined types. This should be the base of new types. Note that @@ -825,6 +998,8 @@ class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)): import sqlalchemy.types as types class MyType(types.UserDefinedType): + cache_ok = True + def __init__(self, precision = 8): self.precision = precision @@ -860,6 +1035,20 @@ def process(value): the ``get_col_spec()`` method via the keyword argument ``type_expression``, if it receives ``**kw`` in its signature. + The :attr:`.UserDefinedType.cache_ok` class-level flag indicates if this + custom :class:`.UserDefinedType` is safe to be used as part of a cache key. + This flag defaults to ``None`` which will initially generate a warning + when the SQL compiler attempts to generate a cache key for a statement + that uses this type. If the :class:`.UserDefinedType` is not guaranteed + to produce the same bind/result behavior and SQL generation + every time, this flag should be set to ``False``; otherwise if the + class produces the same behavior each time, it may be set to ``True``. + See :attr:`.UserDefinedType.cache_ok` for further notes on how this works. + + .. versionadded:: 1.4.28 Generalized the :attr:`.ExternalType.cache_ok` + flag so that it is available for both :class:`.TypeDecorator` as well + as :class:`.UserDefinedType`. + """ __visit_name__ = "user_defined" @@ -957,7 +1146,7 @@ def adapt_emulated_to_native(cls, impl, **kw): return cls(**kw) -class TypeDecorator(SchemaEventTarget, TypeEngine): +class TypeDecorator(ExternalType, SchemaEventTarget, TypeEngine): """Allows the creation of types which add additional functionality to an existing type. @@ -1120,47 +1309,6 @@ def __init__(self, *args, **kwargs): """ - cache_ok = None - """Indicate if statements using this :class:`.TypeDecorator` are "safe to - cache". - - The default value ``None`` will emit a warning and then not allow caching - of a statement which includes this type. Set to ``False`` to disable - statements using this type from being cached at all without a warning. - When set to ``True``, the object's class and selected elements from its - state will be used as part of the cache key, e.g.:: - - class MyType(TypeDecorator): - impl = String - - cache_ok = True - - def __init__(self, choices): - self.choices = tuple(choices) - self.internal_only = True - - The cache key for the above type would be equivalent to:: - - (, ('choices', ('a', 'b', 'c'))) - - The caching scheme will extract attributes from the type that correspond - to the names of parameters in the ``__init__()`` method. Above, the - "choices" attribute becomes part of the cache key but "internal_only" - does not, because there is no parameter named "internal_only". - - The requirements for cacheable elements is that they are hashable - and also that they indicate the same SQL rendered for expressions using - this type every time for a given cache value. - - .. versionadded:: 1.4.14 - added the ``cache_ok`` flag to allow - some configurability of caching for :class:`.TypeDecorator` classes. - - .. seealso:: - - :ref:`sql_caching` - - """ - class Comparator(TypeEngine.Comparator): """A :class:`.TypeEngine.Comparator` that is specific to :class:`.TypeDecorator`. @@ -1196,21 +1344,6 @@ def comparator_factory(self): {}, ) - @property - def _static_cache_key(self): - if self.cache_ok is None: - util.warn( - "TypeDecorator %r will not produce a cache key because " - "the ``cache_ok`` flag is not set to True. " - "Set this flag to True if this type object's " - "state is safe to use in a cache key, or False to " - "disable this warning." % self - ) - elif self.cache_ok is True: - return super(TypeDecorator, self)._static_cache_key - - return NO_CACHE - def _gen_dialect_impl(self, dialect): """ #todo diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index df8abdc6944..9e695f6782b 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -13,6 +13,7 @@ "TypeEngine", "TypeDecorator", "UserDefinedType", + "ExternalType", "INT", "CHAR", "VARCHAR", @@ -110,6 +111,7 @@ from .sql.sqltypes import VARBINARY from .sql.sqltypes import VARCHAR from .sql.type_api import adapt_type +from .sql.type_api import ExternalType from .sql.type_api import to_instance from .sql.type_api import TypeDecorator from .sql.type_api import TypeEngine diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index 2db7a574464..fe3512375ac 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -67,6 +67,7 @@ from sqlalchemy.sql.selectable import Select from sqlalchemy.sql.selectable import Selectable from sqlalchemy.sql.selectable import SelectStatementGrouping +from sqlalchemy.sql.type_api import UserDefinedType from sqlalchemy.sql.visitors import InternalTraversal from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -1744,19 +1745,21 @@ def test_is_select(self, case): class TypesTest(fixtures.TestBase): - def test_typedec_no_cache(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_no_cache(self, base): + class MyType(base): impl = String expr = column("q", MyType()) == 1 with expect_warnings( - r"TypeDecorator MyType\(\) will not produce a cache key" + r"%s MyType\(\) will not produce a cache key" % base.__name__ ): is_(expr._generate_cache_key(), None) - def test_typedec_cache_false(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_cache_false(self, base): + class MyType(base): impl = String cache_ok = False @@ -1765,8 +1768,9 @@ class MyType(TypeDecorator): is_(expr._generate_cache_key(), None) - def test_typedec_cache_ok(self): - class MyType(TypeDecorator): + @testing.combinations(TypeDecorator, UserDefinedType) + def test_thirdparty_cache_ok(self, base): + class MyType(base): impl = String cache_ok = True diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 01266d15b88..ffa6f922ed2 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -1464,6 +1464,8 @@ class Foo(TypeDecorator): def test_type_decorator_compile_variant_two(self): class UTypeOne(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPEONE" @@ -1474,6 +1476,8 @@ def process(value): return process class UTypeTwo(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETWO" @@ -1522,6 +1526,8 @@ class Foo(TypeDecorator): class VariantTest(fixtures.TestBase, AssertsCompiledSQL): def setup_test(self): class UTypeOne(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPEONE" @@ -1532,6 +1538,8 @@ def process(value): return process class UTypeTwo(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETWO" @@ -2961,6 +2969,8 @@ def define_tables(cls, metadata): global MyCustomType, MyTypeDec class MyCustomType(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "INT" From 3b6f7ffc878c9ddc2da8da37d9484008cc2eb24f Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Mon, 27 Sep 2021 12:41:24 -0400 Subject: [PATCH 027/632] Add new sections regarding schemas and reflection * add a new section to reflection.rst `Schemas and Reflection`. * this contains some text from the ticket * migrate some text from `Specifying the Schema Name` to new section * migrate some text from PostgreSQL dialect to new section * target text is made more generic * cross-reference the postgres and new sections to one another, to avoid duplication of docs * update some docs 'meta' to 'metadata_obj' Fixes: #4387 Co-authored-by: Mike Bayer Change-Id: I2b08672753fb2575d30ada07ead77587468fdade (cherry picked from commit 0fa0beacb465c61e792c97d530a0e8fdd7139256) --- doc/build/changelog/migration_12.rst | 2 +- doc/build/core/metadata.rst | 45 ++++- doc/build/core/reflection.rst | 223 ++++++++++++++++++++- doc/build/core/type_basics.rst | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 50 ++--- lib/sqlalchemy/sql/schema.py | 4 +- lib/sqlalchemy/sql/type_api.py | 2 +- 7 files changed, 285 insertions(+), 43 deletions(-) diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index f0b88c49361..bc1d0739e9d 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -1048,7 +1048,7 @@ localized to the current VALUES clause being processed:: def mydefault(context): return context.get_current_parameters()['counter'] + 12 - mytable = Table('mytable', meta, + mytable = Table('mytable', metadata_obj, Column('counter', Integer), Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 86a8f6de345..c7316d1b650 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -284,11 +284,11 @@ remote servers (Oracle DBLINK with synonyms). What all of the above approaches have (mostly) in common is that there's a way of referring to this alternate set of tables using a string name. SQLAlchemy -refers to this name as the **schema name**. Within SQLAlchemy, this is nothing more than -a string name which is associated with a :class:`_schema.Table` object, and -is then rendered into SQL statements in a manner appropriate to the target -database such that the table is referred towards in its remote "schema", whatever -mechanism that is on the target database. +refers to this name as the **schema name**. Within SQLAlchemy, this is nothing +more than a string name which is associated with a :class:`_schema.Table` +object, and is then rendered into SQL statements in a manner appropriate to the +target database such that the table is referred towards in its remote "schema", +whatever mechanism that is on the target database. The "schema" name may be associated directly with a :class:`_schema.Table` using the :paramref:`_schema.Table.schema` argument; when using the ORM @@ -298,11 +298,27 @@ the parameter is passed using the ``__table_args__`` parameter dictionary. The "schema" name may also be associated with the :class:`_schema.MetaData` object where it will take effect automatically for all :class:`_schema.Table` objects associated with that :class:`_schema.MetaData` that don't otherwise -specify their own name. Finally, SQLAlchemy also supports a "dynamic" schema name +specify their own name. Finally, SQLAlchemy also supports a "dynamic" schema name system that is often used for multi-tenant applications such that a single set of :class:`_schema.Table` metadata may refer to a dynamically configured set of schema names on a per-connection or per-statement basis. +.. topic:: What's "schema" ? + + SQLAlchemy's support for database "schema" was designed with first party + support for PostgreSQL-style schemas. In this style, there is first a + "database" that typically has a single "owner". Within this database there + can be any number of "schemas" which then contain the actual table objects. + + A table within a specific schema is referred towards explicitly using the + syntax ".". Constrast this to an architecture such + as that of MySQL, where there are only "databases", however SQL statements + can refer to multiple databases at once, using the same syntax except it + is ".". On Oracle, this syntax refers to yet another + concept, the "owner" of a table. Regardless of which kind of database is + in use, SQLAlchemy uses the phrase "schema" to refer to the qualifying + identifier within the general syntax of ".". + .. seealso:: :ref:`orm_declarative_table_schema_name` - schema name specification when using the ORM @@ -368,6 +384,8 @@ at once, such as:: :ref:`multipart_schema_names` - describes use of dotted schema names with the SQL Server dialect. + :ref:`schema_table_reflection` + .. _schema_metadata_schema_name: @@ -438,11 +456,11 @@ to specify that it should not be schema qualified may use the special symbol schema=BLANK_SCHEMA # will not use "remote_banks" ) - .. seealso:: :paramref:`_schema.MetaData.schema` + .. _schema_dynamic_naming_convention: Applying Dynamic Schema Naming Conventions @@ -454,11 +472,11 @@ basis, so that for example in multi-tenant situations, each transaction or statement may be targeted at a specific set of schema names that change. The section :ref:`schema_translating` describes how this feature is used. - .. seealso:: :ref:`schema_translating` + .. _schema_set_default_connections: Setting a Default Schema for New Connections @@ -506,6 +524,17 @@ for specific information regarding how default schemas are configured. :ref:`postgresql_alternate_search_path` - in the :ref:`postgresql_toplevel` dialect documentation. + + + +Schemas and Reflection +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The schema feature of SQLAlchemy interacts with the table reflection +feature introduced at ref:`metadata_reflection_toplevel`. See the section +:ref:`metadata_reflection_schemas` for additional details on how this works. + + Backend-Specific Options ------------------------ diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index 0660823eb02..fdcbf8c3970 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -13,7 +13,7 @@ existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload_with`` argument:: - >>> messages = Table('messages', meta, autoload_with=engine) + >>> messages = Table('messages', metadata_obj, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] @@ -30,8 +30,8 @@ Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: - >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload_with=engine) - >>> 'shopping_carts' in meta.tables: + >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine) + >>> 'shopping_carts' in metadata_obj.tables: True The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" @@ -43,7 +43,7 @@ you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: - shopping_carts = Table('shopping_carts', meta) + shopping_carts = Table('shopping_carts', metadata_obj) Of course, it's a good idea to use ``autoload_with=engine`` with the above table regardless. This is so that the table's attributes will be loaded if they have @@ -61,7 +61,7 @@ Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: - >>> mytable = Table('mytable', meta, + >>> mytable = Table('mytable', metadata_obj, ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode ... # additional Column objects which require no change are reflected normally @@ -119,6 +119,219 @@ object's dictionary of tables:: for table in reversed(metadata_obj.sorted_tables): someengine.execute(table.delete()) +.. _metadata_reflection_schemas: + +Reflecting Tables from Other Schemas +------------------------------------ + +The section :ref:`schema_table_schema_name` introduces the concept of table +schemas, which are namespaces within a database that contain tables and other +objects, and which can be specified explicitly. The "schema" for a +:class:`_schema.Table` object, as well as for other objects like views, indexes and +sequences, can be set up using the :paramref:`_schema.Table.schema` parameter, +and also as the default schema for a :class:`_schema.MetaData` object using the +:paramref:`_schema.MetaData.schema` parameter. + +The use of this schema parameter directly affects where the table reflection +feature will look when it is asked to reflect objects. For example, given +a :class:`_schema.MetaData` object configured with a default schema name +"project" via its :paramref:`_schema.MetaData.schema` parameter:: + + >>> metadata_obj = MetaData(schema="project") + +The :method:`.MetaData.reflect` will then utilize that configured ``.schema`` +for reflection:: + + >>> # uses `schema` configured in metadata_obj + >>> metadata_obj.reflect(someengine) + +The end result is that :class:`_schema.Table` objects from the "project" +schema will be reflected, and they will be populated as schema-qualified +with that name:: + + >>> metadata_obj.tables['project.messages'] + Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') + +Similarly, an individual :class:`_schema.Table` object that includes the +:paramref:`_schema.Table.schema` parameter will also be reflected from that +database schema, overriding any default schema that may have been configured on the +owning :class:`_schema.MetaData` collection:: + + >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine) + >>> messages + Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') + +Finally, the :meth:`_schema.MetaData.reflect` method itself also allows a +:paramref:`_schema.MetaData.reflect.schema` parameter to be passed, so we +could also load tables from the "project" schema for a default configured +:class:`_schema.MetaData` object:: + + >>> metadata_obj = MetaData() + >>> metadata_obj.reflect(someengine, schema="project") + +We can call :meth:`_schema.MetaData.reflect` any number of times with different +:paramref:`_schema.MetaData.schema` arguments (or none at all) to continue +populating the :class:`_schema.MetaData` object with more objects:: + + >>> # add tables from the "customer" schema + >>> metadata_obj.reflect(someengine, schema="customer") + >>> # add tables from the default schema + >>> metadata_obj.reflect(someengine) + +.. _reflection_schema_qualified_interaction: + +Interaction of Schema-qualified Reflection with the Default Schema +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. admonition:: Section Best Practices Summarized + + In this section, we discuss SQLAlchemy's reflection behavior regarding + tables that are visible in the "default schema" of a database session, + and how these interact with SQLAlchemy directives that include the schema + explicitly. As a best practice, ensure the "default" schema for a database + is just a single name, and not a list of names; for tables that are + part of this "default" schema and can be named without schema qualification + in DDL and SQL, leave corresponding :paramref:`_schema.Table.schema` and + similar schema parameters set to their default of ``None``. + +As described at :ref:`schema_metadata_schema_name`, databases that have +the concept of schemas usually also include the concept of a "default" schema. +The reason for this is naturally that when one refers to table objects without +a schema as is common, a schema-capable database will still consider that +table to be in a "schema" somewhere. Some databases such as PostgreSQL +take this concept further into the notion of a +`schema search path +`_ +where *multiple* schema names can be considered in a particular database +session to be "implicit"; referring to a table name that it's any of those +schemas will not require that the schema name be present (while at the same time +it's also perfectly fine if the schema name *is* present). + +Since most relational databases therefore have the concept of a particular +table object which can be referred towards both in a schema-qualified way, as +well as an "implicit" way where no schema is present, this presents a +complexity for SQLAlchemy's reflection +feature. Reflecting a table in +a schema-qualified manner will always populate its :attr:`_schema.Table.schema` +attribute and additionally affect how this :class:`_schema.Table` is organized +into the :attr:`_schema.MetaData.tables` collection, that is, in a schema +qualified manner. Conversely, reflecting the **same** table in a non-schema +qualified manner will organize it into the :attr:`_schema.MetaData.tables` +collection **without** being schema qualified. The end result is that there +would be two separate :class:`_schema.Table` objects in the single +:class:`_schema.MetaData` collection representing the same table in the +actual database. + +To illustrate the ramifications of this issue, consider tables from the +"project" schema in the previous example, and suppose also that the "project" +schema is the default schema of our database connection, or if using a database +such as PostgreSQL suppose the "project" schema is set up in the PostgreSQL +``search_path``. This would mean that the database accepts the following +two SQL statements as equivalent:: + + -- schema qualified + SELECT message_id FROM project.messages + + -- non-schema qualified + SELECT message_id FROM messages + +This is not a problem as the table can be found in both ways. However +in SQLAlchemy, it's the **identity** of the :class:`_schema.Table` object +that determines its semantic role within a SQL statement. Based on the current +decisions within SQLAlchemy, this means that if we reflect the same "messages" table in +both a schema-qualified as well as a non-schema qualified manner, we get +**two** :class:`_schema.Table` objects that will **not** be treated as +semantically equivalent:: + + >>> # reflect in non-schema qualified fashion + >>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine) + >>> # reflect in schema qualified fashion + >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> # two different objects + >>> messages_table_1 is messages_table_2 + False + >>> # stored in two different ways + >>> metadata.tables["messages"] is messages_table_1 + True + >>> metadata.tables["project.messages"] is messages_table_2 + True + +The above issue becomes more complicated when the tables being reflected contain +foreign key references to other tables. Suppose "messages" has a "project_id" +column which refers to rows in another schema-local table "projects", meaning +there is a :class:`_schema.ForeignKeyConstraint` object that is part of the +definition of the "messages" table. + +We can find ourselves in a situation where one :class:`_schema.MetaData` +collection may contain as many as four :class:`_schema.Table` objects +representing these two database tables, where one or two of the additional +tables were generated by the reflection process; this is because when +the reflection process encounters a foreign key constraint on a table +being reflected, it branches out to reflect that referenced table as well. +The decision making it uses to assign the schema to this referenced +table is that SQLAlchemy will **omit a default schema** from the reflected +:class:`_schema.ForeignKeyConstraint` object if the owning +:class:`_schema.Table` also omits its schema name and also that these two objects +are in the same schema, but will **include** it if +it were not omitted. + +The common scenario is when the reflection of a table in a schema qualified +fashion then loads a related table that will also be performed in a schema +qualified fashion:: + + >>> # reflect "messages" in a schema qualified fashion + >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + +The above ``messages_table_1`` will refer to ``projects`` also in a schema +qualified fashion. This "projects" table will be reflected automatically by +the fact that "messages" refers to it:: + + >>> messages_table_1.c.project_id + Column('project_id', INTEGER(), ForeignKey('project.projects.project_id'), table=) + +if some other part of the code reflects "projects" in a non-schema qualified +fashion, there are now two projects tables that are not the same: + + >>> # reflect "projects" in a non-schema qualified fashion + >>> projects_table_1 = Table("projects", metadata_obj, autoload_with=someengine) + + >>> # messages does not refer to projects_table_1 above + >>> messages_table_1.c.project_id.references(projects_table_1.c.project_id) + False + + >>> it refers to this one + >>> projects_table_2 = metadata_obj.tables["project.projects"] + >>> messages_table_1.c.project_id.references(projects_table_2.c.project_id) + True + + >>> they're different, as one non-schema qualified and the other one is + >>> projects_table_1 is projects_table_2 + False + +The above confusion can cause problems within applications that use table +reflection to load up application-level :class:`_schema.Table` objects, as +well as within migration scenarios, in particular such as when using Alembic +Migrations to detect new tables and foreign key constraints. + +The above behavior can be remedied by sticking to one simple practice: + +* Don't include the :paramref:`_schema.Table.schema` parameter for any + :class:`_schema.Table` that expects to be located in the **default** schema + of the database. + +For PostgreSQL and other databases that support a "search" path for schemas, +add the following additional practice: + +* Keep the "search path" narrowed down to **one schema only, which is the + default schema**. + + +.. seealso:: + + :ref:`postgresql_schema_reflection` - additional details of this behavior + as regards the PostgreSQL database. + + .. _metadata_reflection_inspector: Fine Grained Reflection with Inspector diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index b938cc5eee4..3ec50cc0039 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -232,7 +232,7 @@ such as `collation` and `charset`:: from sqlalchemy.dialects.mysql import VARCHAR, TEXT - table = Table('foo', meta, + table = Table('foo', metadata_obj, Column('col1', VARCHAR(200, collation='binary')), Column('col2', TEXT(charset='latin1')) ) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c1a2cf81dcf..e58e430e85d 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -273,20 +273,22 @@ def set_search_path(dbapi_connection, connection_record): Remote-Schema Table Introspection and PostgreSQL search_path ------------------------------------------------------------ -**TL;DR;**: keep the ``search_path`` variable set to its default of ``public``, -name schemas **other** than ``public`` explicitly within ``Table`` definitions. - -The PostgreSQL dialect can reflect tables from any schema. The -:paramref:`_schema.Table.schema` argument, or alternatively the -:paramref:`.MetaData.reflect.schema` argument determines which schema will -be searched for the table or tables. The reflected :class:`_schema.Table` -objects -will in all cases retain this ``.schema`` attribute as was specified. -However, with regards to tables which these :class:`_schema.Table` -objects refer to -via foreign key constraint, a decision must be made as to how the ``.schema`` -is represented in those remote tables, in the case where that remote -schema name is also a member of the current +.. admonition:: Section Best Practices Summarized + + keep the ``search_path`` variable set to its default of ``public``, without + any other schema names. For other schema names, name these explicitly + within :class:`_schema.Table` definitions. Alternatively, the + ``postgresql_ignore_search_path`` option will cause all reflected + :class:`_schema.Table` objects to have a :attr:`_schema.Table.schema` + attribute set up. + +The PostgreSQL dialect can reflect tables from any schema, as outlined in +:ref:`schema_table_reflection`. + +With regards to tables which these :class:`_schema.Table` +objects refer to via foreign key constraint, a decision must be made as to how +the ``.schema`` is represented in those remote tables, in the case where that +remote schema name is also a member of the current `PostgreSQL search path `_. @@ -349,8 +351,8 @@ def set_search_path(dbapi_connection, connection_record): >>> engine = create_engine("postgresql://scott:tiger@localhost/test") >>> with engine.connect() as conn: ... conn.execute(text("SET search_path TO test_schema, public")) - ... meta = MetaData() - ... referring = Table('referring', meta, + ... metadata_obj = MetaData() + ... referring = Table('referring', metadata_obj, ... autoload_with=conn) ... @@ -359,7 +361,7 @@ def set_search_path(dbapi_connection, connection_record): collection ``referred`` table named **without** the schema:: - >>> meta.tables['referred'].schema is None + >>> metadata_obj.tables['referred'].schema is None True To alter the behavior of reflection such that the referred schema is @@ -370,8 +372,8 @@ def set_search_path(dbapi_connection, connection_record): >>> with engine.connect() as conn: ... conn.execute(text("SET search_path TO test_schema, public")) - ... meta = MetaData() - ... referring = Table('referring', meta, + ... metadata_obj = MetaData() + ... referring = Table('referring', metadata_obj, ... autoload_with=conn, ... postgresql_ignore_search_path=True) ... @@ -379,7 +381,7 @@ def set_search_path(dbapi_connection, connection_record): We will now have ``test_schema.referred`` stored as schema-qualified:: - >>> meta.tables['test_schema.referred'].schema + >>> metadata_obj.tables['test_schema.referred'].schema 'test_schema' .. sidebar:: Best Practices for PostgreSQL Schema reflection @@ -401,13 +403,11 @@ def set_search_path(dbapi_connection, connection_record): which is in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute set to ``None``. -.. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` - dialect-level option accepted by :class:`_schema.Table` and - :meth:`_schema.MetaData.reflect`. - - .. seealso:: + :ref:`reflection_schema_qualified_interaction` - discussion of the issue + from a backend-agnostic perspective + `The Schema Search Path `_ - on the PostgreSQL website. diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 2ee66460e89..83d98a05818 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -4897,7 +4897,7 @@ class Computed(FetchedValue, SchemaItem): from sqlalchemy import Computed - Table('square', meta, + Table('square', metadata_obj, Column('side', Float, nullable=False), Column('area', Float, Computed('side * side')) ) @@ -4994,7 +4994,7 @@ class Identity(IdentityOptions, FetchedValue, SchemaItem): from sqlalchemy import Identity - Table('foo', meta, + Table('foo', metadata_obj, Column('id', Integer, Identity()) Column('description', Text), ) diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 2a4688bcceb..0334dfe2d03 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -843,7 +843,7 @@ def process(value): Once the type is made, it's immediately usable:: - table = Table('foo', meta, + table = Table('foo', metadata_obj, Column('id', Integer, primary_key=True), Column('data', MyType(16)) ) From 4da3e5333868d5ae0b50f40f9540041ff6f6e678 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 14 Nov 2021 20:02:10 -0500 Subject: [PATCH 028/632] handle dunder names in @declared_attr separately Fixed Mypy crash which would occur when using Mypy plugin against code which made use of :class:`_orm.declared_attr` methods for non-mapped names like ``__mapper_args__``, ``__table_args__``, or other dunder names, as the plugin would try to interpret these as mapped attributes which would then be later mis-handled. As part of this change, the decorated function is still converted by the plugin into a generic assignment statement (e.g. ``__mapper_args__: Any``) so that the argument signature can continue to be annotated in the same way one would for any other ``@classmethod`` without Mypy complaining about the wrong argument type for a method that isn't explicitly ``@classmethod``. Fixes: #7321 Change-Id: I55656e867876677c5c55143449db371344be8600 (cherry picked from commit 836902bc8438a800d2c9cf1452da31d3ca967f3b) --- doc/build/changelog/unreleased_14/7321.rst | 16 +++++++++++++ lib/sqlalchemy/ext/mypy/decl_class.py | 15 +++++++++++- lib/sqlalchemy/ext/mypy/util.py | 5 ++++ test/ext/mypy/files/issue_7321.py | 21 +++++++++++++++++ test/ext/mypy/files/issue_7321_part2.py | 27 ++++++++++++++++++++++ 5 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7321.rst create mode 100644 test/ext/mypy/files/issue_7321.py create mode 100644 test/ext/mypy/files/issue_7321_part2.py diff --git a/doc/build/changelog/unreleased_14/7321.rst b/doc/build/changelog/unreleased_14/7321.rst new file mode 100644 index 00000000000..08cca434481 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7321.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash which would occur when using Mypy plugin against code + which made use of :class:`_orm.declared_attr` methods for non-mapped names + like ``__mapper_args__``, ``__table_args__``, or other dunder names, as the + plugin would try to interpret these as mapped attributes which would then + be later mis-handled. As part of this change, the decorated function is + still converted by the plugin into a generic assignment statement (e.g. + ``__mapper_args__: Any``) so that the argument signature can continue to be + annotated in the same way one would for any other ``@classmethod`` without + Mypy complaining about the wrong argument type for a method that isn't + explicitly ``@classmethod``. + + diff --git a/lib/sqlalchemy/ext/mypy/decl_class.py b/lib/sqlalchemy/ext/mypy/decl_class.py index b85ec0f699e..0d7462d5bde 100644 --- a/lib/sqlalchemy/ext/mypy/decl_class.py +++ b/lib/sqlalchemy/ext/mypy/decl_class.py @@ -241,7 +241,20 @@ class MyClass: left_hand_explicit_type: Optional[ProperType] = None - if isinstance(stmt.func.type, CallableType): + if util.name_is_dunder(stmt.name): + # for dunder names like __table_args__, __tablename__, + # __mapper_args__ etc., rewrite these as simple assignment + # statements; otherwise mypy doesn't like if the decorated + # function has an annotation like ``cls: Type[Foo]`` because + # it isn't @classmethod + any_ = AnyType(TypeOfAny.special_form) + left_node = NameExpr(stmt.var.name) + left_node.node = stmt.var + new_stmt = AssignmentStmt([left_node], TempNode(any_)) + new_stmt.type = left_node.node.type + cls.defs.body[dec_index] = new_stmt + return + elif isinstance(stmt.func.type, CallableType): func_type = stmt.func.type.ret_type if isinstance(func_type, UnboundType): type_id = names.type_id_for_unbound_type(func_type, cls, api) diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index a3825f175f6..4d55cb72833 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -1,3 +1,4 @@ +import re from typing import Any from typing import Iterable from typing import Iterator @@ -82,6 +83,10 @@ def deserialize( return cls(typ=typ, info=info, **data) +def name_is_dunder(name): + return bool(re.match(r"^__.+?__$", name)) + + def _set_info_metadata(info: TypeInfo, key: str, data: Any) -> None: info.metadata.setdefault("sqlalchemy", {})[key] = data diff --git a/test/ext/mypy/files/issue_7321.py b/test/ext/mypy/files/issue_7321.py new file mode 100644 index 00000000000..6a40b9ddaa0 --- /dev/null +++ b/test/ext/mypy/files/issue_7321.py @@ -0,0 +1,21 @@ +from typing import Any + +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import declared_attr + + +Base = declarative_base() + + +class Foo(Base): + @declared_attr + def __tablename__(cls) -> str: + return "name" + + @declared_attr + def __mapper_args__(cls) -> dict[Any, Any]: + return {} + + @declared_attr + def __table_args__(cls) -> dict[Any, Any]: + return {} diff --git a/test/ext/mypy/files/issue_7321_part2.py b/test/ext/mypy/files/issue_7321_part2.py new file mode 100644 index 00000000000..f53add1da9c --- /dev/null +++ b/test/ext/mypy/files/issue_7321_part2.py @@ -0,0 +1,27 @@ +from typing import Any +from typing import Type + +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import declared_attr + + +Base = declarative_base() + + +class Foo(Base): + # no mypy error emitted regarding the + # Type[Foo] part + @declared_attr + def __tablename__(cls: Type["Foo"]) -> str: + return "name" + + @declared_attr + def __mapper_args__(cls: Type["Foo"]) -> dict[Any, Any]: + return {} + + # this was a workaround that works if there's no plugin present, make + # sure that doesn't crash anything + @classmethod + @declared_attr + def __table_args__(cls: Type["Foo"]) -> dict[Any, Any]: + return {} From ba763a146f450dcd56e4f1072a28a662ef684ac1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 15 Nov 2021 15:06:06 -0500 Subject: [PATCH 029/632] favor setuptools imports over distutils Python 3.10 has deprecated "distutils" in favor of explicit use of "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports accordingly. However, since setuptools itself only recently added the replacement symbols mentioned in pep-632 as of November of 2022 in version 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy 1.4 does not have a hard setuptools versioning requirement at this time. SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout which will indicate appropriate setuptools versioning up front. Fixes: #7311 Change-Id: I215ef3c3b226a38266f59d181214aea462c4664d (cherry picked from commit e6c02c33728a581e0df910caa8d96b6e114c114d) --- doc/build/changelog/unreleased_14/7311.rst | 12 ++++++++++++ setup.py | 16 ++++++++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7311.rst diff --git a/doc/build/changelog/unreleased_14/7311.rst b/doc/build/changelog/unreleased_14/7311.rst new file mode 100644 index 00000000000..69a522d526e --- /dev/null +++ b/doc/build/changelog/unreleased_14/7311.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, setup + :tickets: 7311 + + Python 3.10 has deprecated "distutils" in favor of explicit use of + "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports + accordingly. However, since setuptools itself only recently added the + replacement symbols mentioned in pep-632 as of November of 2022 in version + 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy + 1.4 does not have a hard setuptools versioning requirement at this time. + SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout + which will indicate appropriate setuptools versioning up front. diff --git a/setup.py b/setup.py index 55a3cee6f98..f1a1cacba36 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,3 @@ -from distutils.command.build_ext import build_ext -from distutils.errors import CCompilerError -from distutils.errors import DistutilsExecError -from distutils.errors import DistutilsPlatformError import os import platform import re @@ -10,8 +6,20 @@ from setuptools import Distribution as _Distribution from setuptools import Extension from setuptools import setup +from setuptools.command.build_ext import build_ext from setuptools.command.test import test as TestCommand +# attempt to use pep-632 imports for setuptools symbols; however, +# since these symbols were only added to setuptools as of 59.0.1, +# fall back to the distutils symbols otherwise +try: + from setuptools.errors import CCompilerError + from setuptools.errors import DistutilsExecError + from setuptools.errors import DistutilsPlatformError +except ImportError: + from distutils.errors import CCompilerError + from distutils.errors import DistutilsExecError + from distutils.errors import DistutilsPlatformError cmdclass = {} From 708ef651fab532c430d4b20dc50e97c377d48d5e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 18 Nov 2021 12:46:25 -0500 Subject: [PATCH 030/632] disable raiseerr for refresh state loader options Fixed ORM regression where the new behavior of "eager loaders run on unexpire" added in :ticket:`1763` would lead to loader option errors being raised inappropriately for the case where a single :class:`_orm.Query` or :class:`_sql.Select` were used to load multiple kinds of entities, along with loader options that apply to just one of those kinds of entity like a :func:`_orm.joinedload`, and later the objects would be refreshed from expiration, where the loader options would attempt to be applied to the mismatched object type and then raise an exception. The check for this mismatch now bypasses raising an error for this case. Fixes: #7318 Change-Id: I111e0f3e0fb0447355574cbdcde002f734833490 (cherry picked from commit d4c170238dc95eeb2dc7e6c5e03270fbc8b36f8f) --- doc/build/changelog/unreleased_14/7318.rst | 13 ++++++ lib/sqlalchemy/orm/strategy_options.py | 3 +- test/orm/test_expire.py | 53 ++++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7318.rst diff --git a/doc/build/changelog/unreleased_14/7318.rst b/doc/build/changelog/unreleased_14/7318.rst new file mode 100644 index 00000000000..d60e303c9c7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7318.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7318 + + Fixed ORM regression where the new behavior of "eager loaders run on + unexpire" added in :ticket:`1763` would lead to loader option errors being + raised inappropriately for the case where a single :class:`_orm.Query` or + :class:`_sql.Select` were used to load multiple kinds of entities, along + with loader options that apply to just one of those kinds of entity like a + :func:`_orm.joinedload`, and later the objects would be refreshed from + expiration, where the loader options would attempt to be applied to the + mismatched object type and then raise an exception. The check for this + mismatch now bypasses raising an error for this case. diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 675c7218bd6..23a8c453323 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -260,7 +260,8 @@ def process_compile_state(self, compile_state): self._process( compile_state, compile_state._lead_mapper_entities, - not bool(compile_state.current_path), + not bool(compile_state.current_path) + and not compile_state.compile_options._for_refresh_state, ) def _process(self, compile_state, mapper_entities, raiseerr): diff --git a/test/orm/test_expire.py b/test/orm/test_expire.py index 5a12a7da4bf..3fba0b446f9 100644 --- a/test/orm/test_expire.py +++ b/test/orm/test_expire.py @@ -25,6 +25,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CountStatements from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -846,6 +847,58 @@ def test_relationship_changes_preserved(self): assert "name" in u.__dict__ assert len(u.addresses) == 2 + @testing.combinations( + (True, False), + (False, False), + (False, True), + ) + def test_skip_options_that_dont_match(self, test_control_case, do_expire): + """test #7318""" + + User, Address, Order = self.classes("User", "Address", "Order") + users, addresses, orders = self.tables("users", "addresses", "orders") + + self.mapper_registry.map_imperatively(Order, orders) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, backref="user", lazy="joined" + ), + "orders": relationship(Order), + }, + ) + self.mapper_registry.map_imperatively(Address, addresses) + sess = fixture_session() + + if test_control_case: + # this would be the error we are skipping, make sure it happens + # for up front + with expect_raises_message( + sa.exc.ArgumentError, + 'Mapped attribute "User.addresses" does not apply to ' + "any of the root entities in this query", + ): + row = sess.execute( + select(Order).options(joinedload(User.addresses)) + ).first() + else: + stmt = ( + select(User, Order) + .join_from(User, Order) + .options(joinedload(User.addresses)) + .order_by(User.id, Order.id) + ) + + row = sess.execute(stmt).first() + + u1, o1 = row + if do_expire: + sess.expire(o1) + eq_(o1.description, "order 1") + def test_mapper_joinedload_props_load(self): users, Address, addresses, User = ( self.tables.users, From 7e1af9595f05dcb56f6cb24680724bbb68277699 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 18 Nov 2021 13:39:54 -0500 Subject: [PATCH 031/632] use typing.Dict newer Pythons seem to accept ``dict[Any, Any]`` which is why this wasn't noticed. Revise fix for #7321 made in I55656e867876677c5c55143449db371344be8600. Fixes: #7321 Change-Id: Idc22e15d098543e07853f4532cfd1aaae4dd6404 (cherry picked from commit 2fe72b977506562811d3b4dce1c138f0a69f7ad4) --- test/ext/mypy/files/issue_7321.py | 5 +++-- test/ext/mypy/files/issue_7321_part2.py | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/test/ext/mypy/files/issue_7321.py b/test/ext/mypy/files/issue_7321.py index 6a40b9ddaa0..d4cd7f2c435 100644 --- a/test/ext/mypy/files/issue_7321.py +++ b/test/ext/mypy/files/issue_7321.py @@ -1,4 +1,5 @@ from typing import Any +from typing import Dict from sqlalchemy.orm import declarative_base from sqlalchemy.orm import declared_attr @@ -13,9 +14,9 @@ def __tablename__(cls) -> str: return "name" @declared_attr - def __mapper_args__(cls) -> dict[Any, Any]: + def __mapper_args__(cls) -> Dict[Any, Any]: return {} @declared_attr - def __table_args__(cls) -> dict[Any, Any]: + def __table_args__(cls) -> Dict[Any, Any]: return {} diff --git a/test/ext/mypy/files/issue_7321_part2.py b/test/ext/mypy/files/issue_7321_part2.py index f53add1da9c..4227f2797e8 100644 --- a/test/ext/mypy/files/issue_7321_part2.py +++ b/test/ext/mypy/files/issue_7321_part2.py @@ -1,4 +1,5 @@ from typing import Any +from typing import Dict from typing import Type from sqlalchemy.orm import declarative_base @@ -16,12 +17,12 @@ def __tablename__(cls: Type["Foo"]) -> str: return "name" @declared_attr - def __mapper_args__(cls: Type["Foo"]) -> dict[Any, Any]: + def __mapper_args__(cls: Type["Foo"]) -> Dict[Any, Any]: return {} # this was a workaround that works if there's no plugin present, make # sure that doesn't crash anything @classmethod @declared_attr - def __table_args__(cls: Type["Foo"]) -> dict[Any, Any]: + def __table_args__(cls: Type["Foo"]) -> Dict[Any, Any]: return {} From fcf36b515350ac5cdf3fd7cf21cce2e5ea4f60f9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 21 Nov 2021 11:50:27 -0500 Subject: [PATCH 032/632] fix year Change-Id: I56a325fde167501a53b588cc1b69255238ac1dbb (cherry picked from commit 3085f28be58bf4e4a0318b932f1422c49e1af2f1) --- doc/build/changelog/unreleased_14/7311.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7311.rst b/doc/build/changelog/unreleased_14/7311.rst index 69a522d526e..bdf1a45daa8 100644 --- a/doc/build/changelog/unreleased_14/7311.rst +++ b/doc/build/changelog/unreleased_14/7311.rst @@ -5,7 +5,7 @@ Python 3.10 has deprecated "distutils" in favor of explicit use of "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports accordingly. However, since setuptools itself only recently added the - replacement symbols mentioned in pep-632 as of November of 2022 in version + replacement symbols mentioned in pep-632 as of November of 2021 in version 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy 1.4 does not have a hard setuptools versioning requirement at this time. SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout From c37ef701638e27863a7af768ba803314fd916587 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 24 Nov 2021 15:11:04 +0100 Subject: [PATCH 033/632] Improve ``ORMExecuteState`` documentation (cherry picked from commit 7c3d3670c68298e88d03bc8f02e01c6a3f7fe42f) --- doc/build/orm/session_api.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/build/orm/session_api.rst b/doc/build/orm/session_api.rst index ada035e957f..2a85c2c926b 100644 --- a/doc/build/orm/session_api.rst +++ b/doc/build/orm/session_api.rst @@ -47,7 +47,6 @@ Session and sessionmaker() .. attribute:: execution_options The complete dictionary of current execution options. - This is a merge of the statement level options with the locally passed execution options. From 30a44a534067be422a22b7bac931c9eb974b7689 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Nov 2021 19:47:57 -0500 Subject: [PATCH 034/632] fix execution options docs local to 1.4 the cherry-pick in c37ef701638e27863a7af768ba8 went in cleanly but failed to accommodate that the spacing here was still wrong in a different way. Change-Id: Ife87fb01aec3eae2ae9635e6951330279f29471b --- doc/build/orm/session_api.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/build/orm/session_api.rst b/doc/build/orm/session_api.rst index 2a85c2c926b..635223aa100 100644 --- a/doc/build/orm/session_api.rst +++ b/doc/build/orm/session_api.rst @@ -46,6 +46,7 @@ Session and sessionmaker() :attr:`_orm.ORMExecuteState.execution_options` .. attribute:: execution_options + The complete dictionary of current execution options. This is a merge of the statement level options with the locally passed execution options. From 2d185f516bcf08c97f8902218911c0750b6427a7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Nov 2021 14:29:00 -0500 Subject: [PATCH 035/632] update migration strategy for dynamic loaders discuss the two current ways for this use case that use 2.0 style querying and introduce that a newer API is likely on the way. Also repair autofunctions for with_parent for 2.0 only. References: #7123 References: #7372 Change-Id: I2ff6cfd780540ee4ee887b61137af7afa1327a9f (cherry picked from commit 29c5fba9ad89e53180f0bd2a026742321093105f) --- doc/build/changelog/migration_20.rst | 64 +++++++++++++++++++++++++++- doc/build/orm/collections.rst | 8 ++-- doc/build/orm/extensions/asyncio.rst | 4 ++ doc/build/orm/queryguide.rst | 2 +- 4 files changed, 71 insertions(+), 7 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 79e198d09c4..8f35220d890 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -260,12 +260,12 @@ the SQLAlchemy project itself, the approach taken is as follows: import warnings from sqlalchemy import exc - + # for warnings not included in regex-based filter below, just log warnings.filterwarnings( "always", category=exc.RemovedIn20Warning ) - + # for warnings related to execute() / scalar(), raise for msg in [ r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function", @@ -1966,6 +1966,66 @@ the :func:`_orm.selectinload` strategy presents a collection-oriented eager loader that is superior in most respects to :func:`_orm.joinedload` and should be preferred. +.. _migration_20_dynamic_loaders: + +Making use of "dynamic" relationship loads without using Query +--------------------------------------------------------------- + +**Synopsis** + +The ``lazy="dynamic"`` relationship loader strategy, discussed at +:ref:`dynamic_relationship`, makes use of the :class:`_query.Query` object +which is legacy in 2.0. + + +**Migration to 2.0** + +This pattern is still under adjustment for SQLAlchemy 2.0, and it is expected +that new APIs will be introduced. In the interim, there are two ways +to achieve 2.0 style querying that's in terms of a specific relationship: + +* Make use of the :attr:`_orm.Query.statement` attribute on an existing + ``lazy="dynamic"`` relationship. We can use methods like + :meth:`_orm.Session.scalars` with the dynamic loader straight away as + follows:: + + + class User(Base): + __tablename__ = 'user' + + posts = relationship(Post, lazy="dynamic") + + jack = session.get(User, 5) + + # filter Jack's blog posts + posts = session.scalars( + jack.posts.statement.where(Post.headline == "this is a post") + ) + +* Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select` + construct directly:: + + from sqlalchemy.orm import with_parent + + jack = session.get(User, 5) + + posts = session.scalars( + select(Post). + where(with_parent(jack, User.posts)). + where(Post.headline == "this is a post") + ) + +**Discussion** + +The original idea was that the :func:`_orm.with_parent` function should be +sufficient, however continuing to make use of special attributes on the +relationship itself remains appealing, and there's no reason a 2.0 style +construct can't be made to work here as well. There will likely be a new +loader strategy name that sets up an API similar to the example above that +uses the ``.statement`` attribute, such as +``jack.posts.select().where(Post.headline == 'headline')``. + +.. _migration_20_session_autocommit: Autocommit mode removed from Session; autobegin support added ------------------------------------------------------------- diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index bc98b4b41d8..31db0b26168 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -32,10 +32,10 @@ loading of child items both at load time as well as deletion time. Dynamic Relationship Loaders ---------------------------- -.. note:: This is a legacy feature. Using the :func:`_orm.with_parent` - filter in conjunction with :func:`_sql.select` is the :term:`2.0 style` - method of use. For relationships that shouldn't load, set - :paramref:`_orm.relationship.lazy` to ``noload``. +.. note:: SQLAlchemy 2.0 will have a slightly altered pattern for "dynamic" + loaders that does not rely upon the :class:`_orm.Query` object, which + will be legacy in 2.0. For current migration strategies, + see :ref:`migration_20_dynamic_loaders`. .. note:: This loader is in the general case not compatible with the :ref:`asyncio_toplevel` extension. It can be used with some limitations, as indicated in :ref:`Asyncio dynamic guidelines `. diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index fcaf104467c..91c7c53e1e9 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -343,6 +343,10 @@ Other guidelines include: ) addresses_filter = (await session.scalars(stmt)).all() + .. seealso:: + + :ref:`migration_20_dynamic_loaders` - notes on migration to 2.0 style + .. _session_run_sync: Running Synchronous Methods and Functions under asyncio diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index a10af53ba14..91c0b311980 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1081,4 +1081,4 @@ matching objects locally present in the :class:`_orm.Session`. See the section .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK From 2da27bcaa79ab88504a47a24b9d3d70a0a1f37ec Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 27 Nov 2021 09:53:29 +0100 Subject: [PATCH 036/632] adapt pytest plugin to support pytest v7 Implemented support for the test suite to run correctly under Pytest 7. Previously, only Pytest 6.x was supported for Python 3, however the version was not pinned on the upper bound in tox.ini. Pytest is not pinned in tox.ini to be lower than version 8 so that SQLAlchemy versions released with the current codebase will be able to be tested under tox without changes to the environment. Much thanks to the Pytest developers for their help with this issue. Change-Id: I3b12166199be2b913ee16e78b3ebbff415654396 (cherry picked from commit 7f84297c9947ff8d52b091aedb2a46035791763c) --- doc/build/changelog/unreleased_14/pytest7.rst | 11 ++ lib/sqlalchemy/testing/asyncio.py | 1 - lib/sqlalchemy/testing/plugin/bootstrap.py | 14 +- lib/sqlalchemy/testing/plugin/plugin_base.py | 2 +- lib/sqlalchemy/testing/plugin/pytestplugin.py | 147 ++++++++++-------- test/base/test_except.py | 1 - test/conftest.py | 2 +- tox.ini | 2 +- 8 files changed, 104 insertions(+), 76 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/pytest7.rst diff --git a/doc/build/changelog/unreleased_14/pytest7.rst b/doc/build/changelog/unreleased_14/pytest7.rst new file mode 100644 index 00000000000..4397626269b --- /dev/null +++ b/doc/build/changelog/unreleased_14/pytest7.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, tests + + Implemented support for the test suite to run correctly under Pytest 7. + Previously, only Pytest 6.x was supported for Python 3, however the version + was not pinned on the upper bound in tox.ini. Pytest is not pinned in + tox.ini to be lower than version 8 so that SQLAlchemy versions released + with the current codebase will be able to be tested under tox without + changes to the environment. Much thanks to the Pytest developers for + their help with this issue. + diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index 877d1eb94bf..b964ac57ceb 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -63,7 +63,6 @@ def _maybe_async_provisioning(fn, *args, **kwargs): """ if not ENABLE_ASYNCIO: - return fn(*args, **kwargs) if config.any_async: diff --git a/lib/sqlalchemy/testing/plugin/bootstrap.py b/lib/sqlalchemy/testing/plugin/bootstrap.py index b4691c57d42..6721f485fef 100644 --- a/lib/sqlalchemy/testing/plugin/bootstrap.py +++ b/lib/sqlalchemy/testing/plugin/bootstrap.py @@ -12,8 +12,6 @@ SQLAlchemy/Alembic themselves without the need to ship/install a separate package outside of SQLAlchemy. -NOTE: copied/adapted from SQLAlchemy main for backwards compatibility; -this should be removable when Alembic targets SQLAlchemy 1.0.0. """ @@ -27,14 +25,20 @@ def load_file_as_module(name): path = os.path.join(os.path.dirname(bootstrap_file), "%s.py" % name) - if sys.version_info >= (3, 3): - from importlib import machinery - mod = machinery.SourceFileLoader(name, path).load_module() + if sys.version_info >= (3, 5): + import importlib.util + + spec = importlib.util.spec_from_file_location(name, path) + assert spec is not None + assert spec.loader is not None + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) else: import imp mod = imp.load_source(name, path) + return mod diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index d2e4a0f690a..36390c590a7 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -101,7 +101,7 @@ def setup_options(make_option): make_option( "--dbdriver", action="append", - type="string", + type=str, dest="dbdriver", help="Additional database drivers to include in tests. " "These are linked to the existing database URLs by the " diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py index 6c6287060b7..41e68307063 100644 --- a/lib/sqlalchemy/testing/plugin/pytestplugin.py +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -205,27 +205,36 @@ def pytest_collection_modifyitems(session, config, items): items[:] = [ item for item in items - if isinstance(item.parent, pytest.Instance) - and not item.parent.parent.name.startswith("_") + if item.getparent(pytest.Class) is not None + and not item.getparent(pytest.Class).name.startswith("_") ] - test_classes = set(item.parent for item in items) + test_classes = set(item.getparent(pytest.Class) for item in items) + + def collect(element): + for inst_or_fn in element.collect(): + if isinstance(inst_or_fn, pytest.Collector): + # no yield from in 2.7 + for el in collect(inst_or_fn): + yield el + else: + yield inst_or_fn def setup_test_classes(): for test_class in test_classes: for sub_cls in plugin_base.generate_sub_tests( - test_class.cls, test_class.parent.module + test_class.cls, test_class.module ): if sub_cls is not test_class.cls: per_cls_dict = rebuilt_items[test_class.cls] # support pytest 5.4.0 and above pytest.Class.from_parent ctor = getattr(pytest.Class, "from_parent", pytest.Class) - for inst in ctor( - name=sub_cls.__name__, parent=test_class.parent.parent - ).collect(): - for t in inst.collect(): - per_cls_dict[t.name].append(t) + module = test_class.getparent(pytest.Module) + for fn in collect( + ctor(name=sub_cls.__name__, parent=module) + ): + per_cls_dict[fn.name].append(fn) # class requirements will sometimes need to access the DB to check # capabilities, so need to do this for async @@ -233,8 +242,9 @@ def setup_test_classes(): newitems = [] for item in items: - if item.parent.cls in rebuilt_items: - newitems.extend(rebuilt_items[item.parent.cls][item.name]) + cls_ = item.cls + if cls_ in rebuilt_items: + newitems.extend(rebuilt_items[cls_][item.name]) else: newitems.append(item) @@ -247,8 +257,8 @@ def setup_test_classes(): items[:] = sorted( newitems, key=lambda item: ( - item.parent.parent.parent.name, - item.parent.parent.name, + item.getparent(pytest.Module).name, + item.getparent(pytest.Class).name, item.name, ), ) @@ -268,7 +278,7 @@ def pytest_pycollect_makeitem(collector, name, obj): ] elif ( inspect.isfunction(obj) - and isinstance(collector, pytest.Instance) + and collector.cls is not None and plugin_base.want_method(collector.cls, obj) ): # None means, fall back to default logic, which includes @@ -358,10 +368,6 @@ def _parametrize_cls(module, cls): def pytest_runtest_setup(item): from sqlalchemy.testing import asyncio - from sqlalchemy.util import string_types - - if not isinstance(item, pytest.Function): - return # pytest_runtest_setup runs *before* pytest fixtures with scope="class". # plugin_base.start_test_class_outside_fixtures may opt to raise SkipTest @@ -371,48 +377,66 @@ def pytest_runtest_setup(item): global _current_class - if _current_class is None: + if isinstance(item, pytest.Function) and _current_class is None: asyncio._maybe_async_provisioning( plugin_base.start_test_class_outside_fixtures, - item.parent.parent.cls, + item.cls, ) - _current_class = item.parent.parent + _current_class = item.getparent(pytest.Class) - def finalize(): - global _current_class, _current_report - _current_class = None - try: - asyncio._maybe_async_provisioning( - plugin_base.stop_test_class_outside_fixtures, - item.parent.parent.cls, - ) - except Exception as e: - # in case of an exception during teardown attach the original - # error to the exception message, otherwise it will get lost - if _current_report.failed: - if not e.args: - e.args = ( - "__Original test failure__:\n" - + _current_report.longreprtext, - ) - elif e.args[-1] and isinstance(e.args[-1], string_types): - args = list(e.args) - args[-1] += ( - "\n__Original test failure__:\n" - + _current_report.longreprtext - ) - e.args = tuple(args) - else: - e.args += ( - "__Original test failure__", - _current_report.longreprtext, - ) - raise - finally: - _current_report = None - - item.parent.parent.addfinalizer(finalize) +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_teardown(item, nextitem): + # runs inside of pytest function fixture scope + # after test function runs + from sqlalchemy.testing import asyncio + from sqlalchemy.util import string_types + + asyncio._maybe_async(plugin_base.after_test, item) + + yield + # this is now after all the fixture teardown have run, the class can be + # finalized. Since pytest v7 this finalizer can no longer be added in + # pytest_runtest_setup since the class has not yet been setup at that + # time. + # See https://github.com/pytest-dev/pytest/issues/9343 + global _current_class, _current_report + + if _current_class is not None and ( + # last test or a new class + nextitem is None + or nextitem.getparent(pytest.Class) is not _current_class + ): + _current_class = None + + try: + asyncio._maybe_async_provisioning( + plugin_base.stop_test_class_outside_fixtures, item.cls + ) + except Exception as e: + # in case of an exception during teardown attach the original + # error to the exception message, otherwise it will get lost + if _current_report.failed: + if not e.args: + e.args = ( + "__Original test failure__:\n" + + _current_report.longreprtext, + ) + elif e.args[-1] and isinstance(e.args[-1], string_types): + args = list(e.args) + args[-1] += ( + "\n__Original test failure__:\n" + + _current_report.longreprtext + ) + e.args = tuple(args) + else: + e.args += ( + "__Original test failure__", + _current_report.longreprtext, + ) + raise + finally: + _current_report = None def pytest_runtest_call(item): @@ -424,8 +448,8 @@ def pytest_runtest_call(item): asyncio._maybe_async( plugin_base.before_test, item, - item.parent.module.__name__, - item.parent.cls, + item.module.__name__, + item.cls, item.name, ) @@ -439,15 +463,6 @@ def pytest_runtest_logreport(report): _current_report = report -def pytest_runtest_teardown(item, nextitem): - # runs inside of pytest function fixture scope - # after test function runs - - from sqlalchemy.testing import asyncio - - asyncio._maybe_async(plugin_base.after_test, item) - - @pytest.fixture(scope="class") def setup_class_methods(request): from sqlalchemy.testing import asyncio diff --git a/test/base/test_except.py b/test/base/test_except.py index 767fd233c01..d464aa7d747 100644 --- a/test/base/test_except.py +++ b/test/base/test_except.py @@ -538,7 +538,6 @@ def make_combinations(): for cls_list, callable_list in ALL_EXC: unroll.extend(product(cls_list, callable_list)) - print(unroll) return combinations_list(unroll) @make_combinations() diff --git a/test/conftest.py b/test/conftest.py index 0db4486a92f..c1c6c8c21be 100755 --- a/test/conftest.py +++ b/test/conftest.py @@ -51,4 +51,4 @@ code = compile(f.read(), "bootstrap.py", "exec") to_bootstrap = "pytest" exec(code, globals(), locals()) - from pytestplugin import * # noqa + from sqla_pytestplugin import * # noqa diff --git a/tox.ini b/tox.ini index d8ba67a440c..0483ea759f2 100644 --- a/tox.ini +++ b/tox.ini @@ -17,7 +17,7 @@ usedevelop= deps= pytest>=4.6.11,<5.0; python_version < '3' - pytest>=6.2; python_version >= '3' + pytest>=6.2,<8; python_version >= '3' pytest-xdist mock; python_version < '3.3' From 27aed887e233189e84856cd7f42fc385c644f0eb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Nov 2021 17:00:21 -0500 Subject: [PATCH 037/632] document get_bind() overrides for async session Change-Id: Ifcc936a5861d49857d1f365416190cfbd0981aac References: #7383 (cherry picked from commit 250725ab7bbeafbed650f8ecb286ffad01078801) --- lib/sqlalchemy/ext/asyncio/session.py | 79 ++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index d2c96905617..96131926917 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -32,7 +32,6 @@ "expire_all", "expunge", "expunge_all", - "get_bind", "is_modified", "in_transaction", "in_nested_transaction", @@ -430,6 +429,84 @@ def get_nested_transaction(self): else: return None + def get_bind(self, mapper=None, clause=None, bind=None, **kw): + """Return a "bind" to which the synchronous proxied :class:`_orm.Session` + is bound. + + Unlike the :meth:`_orm.Session.get_bind` method, this method is + currently **not** used by this :class:`.AsyncSession` in any way + in order to resolve engines for requests. + + .. note:: + + This method proxies directly to the :meth:`_orm.Session.get_bind` + method, however is currently **not** useful as an override target, + in contrast to that of the :meth:`_orm.Session.get_bind` method. + The example below illustrates how to implement custom + :meth:`_orm.Session.get_bind` schemes that work with + :class:`.AsyncSession` and :class:`.AsyncEngine`. + + The pattern introduced at :ref:`session_custom_partitioning` + illustrates how to apply a custom bind-lookup scheme to a + :class:`_orm.Session` given a set of :class:`_engine.Engine` objects. + To apply a corresponding :meth:`_orm.Session.get_bind` implementation + for use with a :class:`.AsyncSession` and :class:`.AsyncEngine` + objects, continue to subclass :class:`_orm.Session` and apply it to + :class:`.AsyncSession` using + :paramref:`.AsyncSession.sync_session_class`. The inner method must + continue to return :class:`_engine.Engine` instances, which can be + acquired from a :class:`_asyncio.AsyncEngine` using the + :attr:`_asyncio.AsyncEngine.sync_engine` attribute:: + + # using example from "Custom Vertical Partitioning" + + + import random + + from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.orm import Session, sessionmaker + + # construct async engines w/ async drivers + engines = { + 'leader':create_async_engine("sqlite+aiosqlite:///leader.db"), + 'other':create_async_engine("sqlite+aiosqlite:///other.db"), + 'follower1':create_async_engine("sqlite+aiosqlite:///follower1.db"), + 'follower2':create_async_engine("sqlite+aiosqlite:///follower2.db"), + } + + class RoutingSession(Session): + def get_bind(self, mapper=None, clause=None, **kw): + # within get_bind(), return sync engines + if mapper and issubclass(mapper.class_, MyOtherClass): + return engines['other'].sync_engine + elif self._flushing or isinstance(clause, (Update, Delete)): + return engines['leader'].sync_engine + else: + return engines[ + random.choice(['follower1','follower2']) + ].sync_engine + + # apply to AsyncSession using sync_session_class + AsyncSessionMaker = sessionmaker( + class_=AsyncSession, + sync_session_class=RoutingSession + ) + + The :meth:`_orm.Session.get_bind` method is called in a non-asyncio, + implicitly non-blocking context in the same manner as ORM event hooks + and functions that are invoked via :meth:`.AsyncSession.run_sync`, so + routines that wish to run SQL commands inside of + :meth:`_orm.Session.get_bind` can continue to do so using + blocking-style code, which will be translated to implicitly async calls + at the point of invoking IO on the database drivers. + + """ # noqa E501 + + return self.sync_session.get_bind( + mapper=mapper, clause=clause, bind=bind, **kw + ) + async def connection(self, **kw): r"""Return a :class:`_asyncio.AsyncConnection` object corresponding to this :class:`.Session` object's transactional state. From 1f89983b71f30e8cc69912b2b4f4c7a6ec738fdd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 30 Nov 2021 09:06:29 -0500 Subject: [PATCH 038/632] scoped_session adjustments * fix typo, change to a note the async scoped session note * more dragons re: threading.local() Change-Id: I76266507510e4014456d992656f4aadf6d03ba4a (cherry picked from commit 912bdcc8075c640a4f3d9e0194f57b5bdcfc6f2b) --- doc/build/orm/contextual.rst | 17 +++++++++++++++++ lib/sqlalchemy/orm/scoping.py | 8 ++++---- 2 files changed, 21 insertions(+), 4 deletions(-) diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index 2e4dbd93b08..eafdee42766 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -19,6 +19,21 @@ The object is the :class:`.scoped_session` object, and it represents a registry pattern, a good introduction can be found in `Patterns of Enterprise Architecture `_. +.. warning:: + + The :class:`.scoped_session` registry by default uses a Python + `threading.local() `_ + in order to track :class:`_orm.Session` instances. **This is not + necessarily compatible with all application servers**, particularly those + which make use of greenlets or other alternative forms of concurrency + control, which may lead to race conditions (e.g. randomly occurring + failures) when used in moderate to high concurrency scenarios. + Please read :ref:`unitofwork_contextual_threadlocal` and + :ref:`session_lifespan` below to more fully understand the implications + of using ``threading.local()`` to track :class:`_orm.Session` objects + and consider more explicit means of scoping when using application servers + which are not based on traditional threads. + .. note:: The :class:`.scoped_session` object is a very popular and useful object @@ -103,6 +118,8 @@ underlying :class:`.Session` being maintained by the registry:: The above code accomplishes the same task as that of acquiring the current :class:`.Session` by calling upon the registry, then using that :class:`.Session`. +.. _unitofwork_contextual_threadlocal: + Thread-Local Scope ------------------ diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index df3012df1e4..7b228945317 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -125,11 +125,11 @@ class scoped_session(ScopedSessionMixin): See :ref:`unitofwork_contextual` for a tutorial. - ..warning:: + .. note:: - When using :ref:`asyncio_toplevel` the async - version :class:`_asyncio.async_scoped_session` should be - used instead. + When using :ref:`asyncio_toplevel`, the async-compatible + :class:`_asyncio.async_scoped_session` class should be + used in place of :class:`.scoped_session`. """ From 3d4e64b8ef666ea4760c8234ba475b12739660ce Mon Sep 17 00:00:00 2001 From: Kai Mueller <15907922+kasium@users.noreply.github.com> Date: Wed, 1 Dec 2021 10:58:40 -0500 Subject: [PATCH 039/632] Add __class_getitem__ to the declarative Base class Fixed issue where the :func:`_orm.as_declarative` decorator and similar functions used to generate the declarative base class would not copy the ``__class_getitem__()`` method from a given superclass, which prevented the use of pep-484 generics in conjunction with the ``Base`` class. Pull request courtesy Kai Mueller. Fixes: #7368 Closes: #7381 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7381 Pull-request-sha: 7db7fd869a6bb617f767fad5b71ddf7cb5f14ff5 Change-Id: I495718c3082ad6dd2c83fdbf6feba7c529e351cb (cherry picked from commit 20fe2a3dc2597b04338e8907883c6f4ea72518c8) --- doc/build/changelog/unreleased_14/7368.rst | 9 ++++++++ lib/sqlalchemy/orm/decl_api.py | 2 ++ test/orm/declarative/test_typing_py3k.py | 25 ++++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7368.rst create mode 100644 test/orm/declarative/test_typing_py3k.py diff --git a/doc/build/changelog/unreleased_14/7368.rst b/doc/build/changelog/unreleased_14/7368.rst new file mode 100644 index 00000000000..d4415ffec8f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7368.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, mypy + :tickets: 7368 + + Fixed issue where the :func:`_orm.as_declarative` decorator and similar + functions used to generate the declarative base class would not copy the + ``__class_getitem__()`` method from a given superclass, which prevented the + use of pep-484 generics in conjunction with the ``Base`` class. Pull + request courtesy Kai Mueller. diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 94cda236d15..b5bfb0380fc 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -809,6 +809,8 @@ class Base(metaclass=DeclarativeMeta): class_dict["__abstract__"] = True if mapper: class_dict["__mapper_cls__"] = mapper + if hasattr(cls, "__class_getitem__"): + class_dict["__class_getitem__"] = cls.__class_getitem__ return metaclass(name, bases, class_dict) diff --git a/test/orm/declarative/test_typing_py3k.py b/test/orm/declarative/test_typing_py3k.py new file mode 100644 index 00000000000..823fe54f106 --- /dev/null +++ b/test/orm/declarative/test_typing_py3k.py @@ -0,0 +1,25 @@ +from typing import Generic +from typing import Type +from typing import TypeVar + +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy.orm import as_declarative +from sqlalchemy.testing import fixtures + + +class DeclarativeBaseTest(fixtures.TestBase): + def test_class_getitem(self): + T = TypeVar("T", bound="CommonBase") # noqa + + class CommonBase(Generic[T]): + @classmethod + def boring(cls: Type[T]) -> Type[T]: + return cls + + @as_declarative() + class Base(CommonBase[T]): + pass + + class Tab(Base["Tab"]): + a = Column(Integer, primary_key=True) From 76e978c970dad5d4eda49d279fdf0876e4f307e9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 1 Dec 2021 21:39:59 -0500 Subject: [PATCH 040/632] copy list for __iadd__ Fixed issue where a list mapped with :func:`_orm.relationship` would go into an endless loop if in-place added to itself, i.e. the ``+=`` operator were used, as well as if ``.extend()`` were given the same list. Fixes: #7389 Change-Id: Idd5118420f8bc684d1ee36b2b6d4c5812f36cc4c (cherry picked from commit 9fc17513fe69a1fd26fc522f9862af3e0ebfd2c4) --- doc/build/changelog/unreleased_14/7389.rst | 8 ++++++++ lib/sqlalchemy/orm/collections.py | 4 ++-- test/orm/test_collection.py | 13 +++++++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7389.rst diff --git a/doc/build/changelog/unreleased_14/7389.rst b/doc/build/changelog/unreleased_14/7389.rst new file mode 100644 index 00000000000..887193c2895 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7389.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: orm, bug + :tickets: 7389 + + Fixed issue where a list mapped with :func:`_orm.relationship` would go + into an endless loop if in-place added to itself, i.e. the ``+=`` operator + were used, as well as if ``.extend()`` were given the same list. + diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index ec4d00cb0b3..351069f9a5d 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1273,7 +1273,7 @@ def __delslice__(self, start, end): def extend(fn): def extend(self, iterable): - for value in iterable: + for value in list(iterable): self.append(value) _tidy(extend) @@ -1283,7 +1283,7 @@ def __iadd__(fn): def __iadd__(self, iterable): # list.__iadd__ takes any iterable and seems to let TypeError # raise as-is instead of returning NotImplemented - for value in iterable: + for value in list(iterable): self.append(value) return self diff --git a/test/orm/test_collection.py b/test/orm/test_collection.py index 3473dcf50bf..c1ef16c6535 100644 --- a/test/orm/test_collection.py +++ b/test/orm/test_collection.py @@ -2504,6 +2504,19 @@ class Child(object): assert control == p.children assert control == list(p.children) + # test #7389 + if hasattr(p.children, "__iadd__"): + control += control + p.children += p.children + assert control == list(p.children) + + control[:] = [o] + p.children[:] = [o] + if hasattr(p.children, "extend"): + control.extend(control) + p.children.extend(p.children) + assert control == list(p.children) + def test_custom(self): someothertable, sometable = ( self.tables.someothertable, From adcd2b3dce54f64fd173514beb2de59d548f5bed Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 2 Dec 2021 16:07:51 -0500 Subject: [PATCH 041/632] block class_getitem test for python 3.6 This commit in 3d4e64b8ef6 wont work on python 3.6. Change-Id: I8e39268914ee972247e6cf54f6c07366e0033133 --- test/orm/declarative/test_typing_py3k.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/orm/declarative/test_typing_py3k.py b/test/orm/declarative/test_typing_py3k.py index 823fe54f106..7cd70616b75 100644 --- a/test/orm/declarative/test_typing_py3k.py +++ b/test/orm/declarative/test_typing_py3k.py @@ -9,6 +9,8 @@ class DeclarativeBaseTest(fixtures.TestBase): + __requires__ = ("python37",) + def test_class_getitem(self): T = TypeVar("T", bound="CommonBase") # noqa From 70d355358d8a32993b2829a87524ff64a9a39055 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 4 Dec 2021 23:08:05 +0100 Subject: [PATCH 042/632] The where method of exists now accepts multiple cluase. Support multiple clause elements in the :meth:`_sql.Exists.where` method, unifying the api with the on presented by a normal :func:`_sql.select` construct. Fixes: #7386 Change-Id: I5df20478008cd5167053d357cbfad8a641c62b44 (cherry picked from commit b2bc0c8e4138ccef4834a415f7be9012e1c6286e) --- doc/build/changelog/unreleased_14/7386.rst | 7 +++++++ lib/sqlalchemy/sql/selectable.py | 10 ++++++++-- test/sql/test_compiler.py | 8 ++++++++ 3 files changed, 23 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7386.rst diff --git a/doc/build/changelog/unreleased_14/7386.rst b/doc/build/changelog/unreleased_14/7386.rst new file mode 100644 index 00000000000..e344453c8c4 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7386.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, sql + :tickets: 7386 + + Support multiple clause elements in the :meth:`_sql.Exists.where` method, + unifying the api with the on presented by a normal :func:`_sql.select` + construct. diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 95fca267c65..587d5593501 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -6597,6 +6597,9 @@ class Exists(UnaryExpression): See :func:`_sql.exists` for a description of usage. + An ``EXISTS`` clase can also be construed from a :func:`_sql.select` + instance by calling :meth:`_sql.SelectBase.exists`. + """ _from_objects = [] @@ -6635,6 +6638,9 @@ def __init__(self, *args, **kwargs): :ref:`tutorial_exists` - in the :term:`2.0 style` tutorial. + :meth:`_sql.SelectBase.exists` - method to transform a ``SELECT`` to an + ``EXISTS`` clause. + """ # noqa E501 if args and isinstance(args[0], (SelectBase, ScalarSelect)): s = args[0] @@ -6749,7 +6755,7 @@ def select_from(self, *froms): e.element = self._regroup(lambda element: element.select_from(*froms)) return e - def where(self, clause): + def where(self, *clause): """Return a new :func:`_expression.exists` construct with the given expression added to its WHERE clause, joined to the existing clause via AND, if any. @@ -6762,7 +6768,7 @@ def where(self, clause): """ e = self._clone() - e.element = self._regroup(lambda element: element.where(clause)) + e.element = self._regroup(lambda element: element.where(*clause)) return e diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 23cf4eca3ab..eeb102162d6 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -1222,6 +1222,14 @@ def test_exists(self): "SELECT NOT (NOT (EXISTS (SELECT 1))) AS anon_1", ) + self.assert_compile( + exists(42) + .select_from(table1) + .where(table1.c.name == "foo", table1.c.description == "bar"), + "EXISTS (SELECT 42 FROM mytable WHERE mytable.name = :name_1 " + "AND mytable.description = :description_1)", + ) + def test_exists_method(self): subq = ( select(func.count(table2.c.otherid)) From 9eefda17ec4178f55786401bff3538a178a29707 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sun, 5 Dec 2021 22:14:51 +0100 Subject: [PATCH 043/632] fix typo in exists documentation Change-Id: I71773e32ae69bad10642f2f3c7b73a80045e7b8b --- lib/sqlalchemy/sql/selectable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 587d5593501..6004349ac70 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -6597,7 +6597,7 @@ class Exists(UnaryExpression): See :func:`_sql.exists` for a description of usage. - An ``EXISTS`` clase can also be construed from a :func:`_sql.select` + An ``EXISTS`` clase can also be constructed from a :func:`_sql.select` instance by calling :meth:`_sql.SelectBase.exists`. """ From 6c400f300dbcc4cb49beb15136d1d364d835f1be Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 6 Dec 2021 12:41:36 -0500 Subject: [PATCH 044/632] qualify the stringification warning the recipe which uses render_postcompile itself is not insecure as it still renders bound parameters and does not stringify any literal values. Change-Id: Ib5ac2f7ce37dc1415a67b117a9c31c0ee37270b3 (cherry picked from commit 973b6ccbf1f81d2357d310ed44717fe37df96ac6) --- doc/build/faq/sqlexpressions.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index cc629f4cc07..5dcf3e96ad2 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -338,7 +338,9 @@ in the same way, such as SQLite's positional form:: .. warning:: - Remember, all of the above code recipes are **only to be used when**: + Remember, **all** of the above code recipes which stringify literal + values, bypassing the use of bound parameters when sending statements + to the database, are **only to be used when**: 1. the use is **debugging purposes only** @@ -346,7 +348,7 @@ in the same way, such as SQLite's positional form:: 3. only with **local, trusted input** - The above recipes for stringification of parameters are **not secure in + The above recipes for stringification of literal values are **not secure in any way and should never be used against production databases**. .. _faq_sql_expression_percent_signs: From 7b0f5563e924fefee10a373d8a37870f7daa618a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 1 Dec 2021 19:27:25 -0500 Subject: [PATCH 045/632] contextmanager skips rollback if trans says to skip it Fixed issue where if an exception occurred when the :class:`_orm.Session` were to close the connection within the :meth:`_orm.Session.commit` method, when using a context manager for :meth:`_orm.Session.begin` , it would attempt a rollback which would not be possible as the :class:`_orm.Session` was in between where the transaction is committed and the connection is then to be returned to the pool, raising the exception "this sessiontransaction is in the committed state". This exception can occur mostly in an asyncio context where CancelledError can be raised. Fixes: #7388 Change-Id: I1a85a3a7eae79f3553ddf1e3d245a0d90b0a2f40 (cherry picked from commit a845da8b0fc5bb172e278c399a1de9a2e49d62af) --- doc/build/changelog/unreleased_14/7388.rst | 13 +++++++ lib/sqlalchemy/engine/base.py | 7 ++++ lib/sqlalchemy/engine/util.py | 23 +++++++++++-- lib/sqlalchemy/orm/session.py | 3 ++ test/engine/test_transaction.py | 30 ++++++++++++++++ test/orm/test_transaction.py | 40 ++++++++++++++++++++++ 6 files changed, 114 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7388.rst diff --git a/doc/build/changelog/unreleased_14/7388.rst b/doc/build/changelog/unreleased_14/7388.rst new file mode 100644 index 00000000000..1c7775a3419 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7388.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, orm + :tickets: 7388 + + Fixed issue where if an exception occurred when the :class:`_orm.Session` + were to close the connection within the :meth:`_orm.Session.commit` method, + when using a context manager for :meth:`_orm.Session.begin` , it would + attempt a rollback which would not be possible as the :class:`_orm.Session` + was in between where the transaction is committed and the connection is + then to be returned to the pool, raising the exception "this + sessiontransaction is in the committed state". This exception can occur + mostly in an asyncio context where CancelledError can be raised. + diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 0c27ea6d914..a5d973a2c2a 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -2371,6 +2371,13 @@ def _transaction_is_active(self): def _transaction_is_closed(self): return not self._deactivated_from_connection + def _rollback_can_be_called(self): + # for RootTransaction / NestedTransaction, it's safe to call + # rollback() even if the transaction is deactive and no warnings + # will be emitted. tested in + # test_transaction.py -> test_no_rollback_in_deactive(?:_savepoint)? + return True + class MarkerTransaction(Transaction): """A 'marker' transaction that is used for nested begin() calls. diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index 8eb0f182085..660ffafa0a2 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -171,6 +171,23 @@ def _transaction_is_active(self): def _transaction_is_closed(self): raise NotImplementedError() + def _rollback_can_be_called(self): + """indicates the object is in a state that is known to be acceptable + for rollback() to be called. + + This does not necessarily mean rollback() will succeed or not raise + an error, just that there is currently no state detected that indicates + rollback() would fail or emit warnings. + + It also does not mean that there's a transaction in progress, as + it is usually safe to call rollback() even if no transaction is + present. + + .. versionadded:: 1.4.28 + + """ + raise NotImplementedError() + def _get_subject(self): raise NotImplementedError() @@ -216,7 +233,8 @@ def __exit__(self, type_, value, traceback): self.commit() except: with util.safe_reraise(): - self.rollback() + if self._rollback_can_be_called(): + self.rollback() finally: if not out_of_band_exit: subject._trans_context_manager = self._outer_trans_ctx @@ -227,7 +245,8 @@ def __exit__(self, type_, value, traceback): if not self._transaction_is_closed(): self.close() else: - self.rollback() + if self._rollback_can_be_called(): + self.rollback() finally: if not out_of_band_exit: subject._trans_context_manager = self._outer_trans_ctx diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index bb12f7021d6..034651326be 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -939,6 +939,9 @@ def _transaction_is_active(self): def _transaction_is_closed(self): return self._state is CLOSED + def _rollback_can_be_called(self): + return self._state not in (COMMITTED, CLOSED) + class Session(_SessionClassMethods): """Manages persistence operations for ORM-mapped objects. diff --git a/test/engine/test_transaction.py b/test/engine/test_transaction.py index 9e614202237..43b42647eb4 100644 --- a/test/engine/test_transaction.py +++ b/test/engine/test_transaction.py @@ -487,6 +487,36 @@ def test_close2(self, local_connection): result = connection.exec_driver_sql("select * from users") assert len(result.fetchall()) == 0 + @testing.requires.independent_connections + def test_no_rollback_in_deactive(self, local_connection): + """test #7388""" + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object(testing.db.dialect, "do_commit", fail): + with expect_raises_message(BaseException, "some base exception"): + with local_connection.begin(): + pass + + @testing.requires.independent_connections + @testing.requires.savepoints + def test_no_rollback_in_deactive_savepoint(self, local_connection): + """test #7388""" + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object( + testing.db.dialect, "do_release_savepoint", fail + ): + with local_connection.begin(): + with expect_raises_message( + BaseException, "some base exception" + ): + with local_connection.begin_nested(): + pass + @testing.requires.savepoints def test_nested_subtransaction_rollback(self, local_connection): connection = local_connection diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py index f0ef37230cb..603ec079a76 100644 --- a/test/orm/test_transaction.py +++ b/test/orm/test_transaction.py @@ -513,6 +513,46 @@ def do_begin(conn, name): assert conn.closed assert not fairy.is_valid + @testing.requires.independent_connections + def test_no_rollback_in_committed_state(self): + """test #7388 + + Prior to the fix, using the session.begin() context manager + would produce the error "This session is in 'committed' state; no + further SQL can be emitted ", when it attempted to call .rollback() + if the connection.close() operation failed inside of session.commit(). + + While the real exception was chained inside, this still proved to + be misleading so we now skip the rollback() in this specific case + and allow the original error to be raised. + + """ + + sess = fixture_session() + + def fail(*arg, **kw): + raise BaseException("some base exception") + + with mock.patch.object( + testing.db.dialect, "do_rollback", side_effect=fail + ) as fail_mock, mock.patch.object( + testing.db.dialect, + "do_commit", + side_effect=testing.db.dialect.do_commit, + ) as succeed_mock: + + # sess.begin() -> commit(). why would do_rollback() be called? + # because of connection pool finalize_fairy *after* the commit. + # this will cause the conn.close() in session.commit() to fail, + # but after the DB commit succeeded. + with expect_raises_message(BaseException, "some base exception"): + with sess.begin(): + conn = sess.connection() + fairy_conn = conn.connection + + eq_(succeed_mock.mock_calls, [mock.call(fairy_conn)]) + eq_(fail_mock.mock_calls, [mock.call(fairy_conn)]) + def test_continue_flushing_on_commit(self): """test that post-flush actions get flushed also if we're in commit()""" From 7e6115d94a8728bba90464ed3f3e79fa16d6c766 Mon Sep 17 00:00:00 2001 From: Tom Ritchford Date: Sun, 5 Dec 2021 13:27:45 -0500 Subject: [PATCH 046/632] Add __copy__, __deepcopy__ to URL. Fixes: #7400 Added support for ``copy()`` and ``deepcopy()`` to the :class:`_url.URL` class. Pull request courtesy Tom Ritchford. Fixes: #7400 Closes: #7401 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7401 Pull-request-sha: a2c1b8992f5d153c6210178cda47b8ae96b91fb5 Change-Id: I55977338b2655a7d4f733ae786d31e589185e9ca (cherry picked from commit 924cc31975f8874d369db6599575e361bdb34be9) --- doc/build/changelog/unreleased_14/7400.rst | 6 ++++ lib/sqlalchemy/engine/url.py | 16 ++++++++++ test/engine/test_parseconnect.py | 34 ++++++++++++++++++++++ 3 files changed, 56 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7400.rst diff --git a/doc/build/changelog/unreleased_14/7400.rst b/doc/build/changelog/unreleased_14/7400.rst new file mode 100644 index 00000000000..799b3b9a361 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7400.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: usecase, engine + :tickets: 7400 + + Added support for ``copy()`` and ``deepcopy()`` to the :class:`_url.URL` + class. Pull request courtesy Tom Ritchford. diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 488f7395270..320e69fbc38 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -560,6 +560,22 @@ def __str__(self): def __repr__(self): return self.render_as_string() + def __copy__(self): + return self.__class__.create( + self.drivername, + self.username, + self.password, + self.host, + self.port, + self.database, + # note this is an immutabledict of str-> str / tuple of str, + # also fully immutable. does not require deepcopy + self.query, + ) + + def __deepcopy__(self, memo): + return self.__copy__() + def __hash__(self): return hash(str(self)) diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index 67d8369b5dc..f553b1dab56 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -1,3 +1,5 @@ +import copy + import sqlalchemy as tsa from sqlalchemy import create_engine from sqlalchemy import engine_from_config @@ -11,9 +13,11 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false +from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import expect_deprecated @@ -193,6 +197,25 @@ def test_comparison(self): is_true(url1 != url3) is_false(url1 == url3) + def test_copy(self): + url1 = url.make_url( + "dialect://user:pass@host/db?arg1%3D=param1&arg2=param+2" + ) + url2 = copy.copy(url1) + eq_(url1, url2) + is_not(url1, url2) + + def test_deepcopy(self): + url1 = url.make_url( + "dialect://user:pass@host/db?arg1%3D=param1&arg2=param+2" + ) + url2 = copy.deepcopy(url1) + eq_(url1, url2) + is_not(url1, url2) + is_not(url1.query, url2.query) # immutabledict of immutable k/v, + # but it copies it on constructor + # in any case if params are present + @testing.combinations( "drivername", "username", @@ -239,6 +262,17 @@ def test_update_query_dict(self, starting, update_with, expected, append): url.make_url("drivername:///?%s" % expected), ) + @testing.combinations( + "drivername://", + "drivername://?foo=bar", + "drivername://?foo=bar&foo=bat", + ) + def test_query_dict_immutable(self, urlstr): + url_obj = url.make_url(urlstr) + + with expect_raises_message(TypeError, ".*immutable"): + url_obj.query["foo"] = "hoho" + @testing.combinations( ( "foo1=bar1&foo2=bar2", From 8d5ed4c39eed6a13397f43159278138bbe1a4a1b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 3 Dec 2021 14:04:05 -0500 Subject: [PATCH 047/632] Warn when caching is disabled / document This patch adds new warnings for all elements that don't indicate their caching behavior, including user-defined ClauseElement subclasses and third party dialects. it additionally adds new documentation to discuss an apparent performance degradation in 1.4 when caching is disabled as a result in the significant expense incurred by ORM lazy loaders, which in 1.3 used BakedQuery so were actually cached. As a result of adding the warnings, a fair degree of lesser used SQL expression objects identified that they did not define caching behavior so would have been producing ``[no key]``, including PostgreSQL constructs ``hstore`` and ``array``. These have been amended to use inherit cache where appropriate. "on conflict" constructs in PostgreSQL, MySQL, SQLite still explicitly don't generate a cache key at this time. The change also adds a test for all constructs via assert_compile() to assert they will not generate cache warnings. Fixes: #7394 Change-Id: I85958affbb99bfad0f5efa21bc8f2a95e7e46981 (cherry picked from commit 22deafe15289d2be55682e1632016004b02b62c0) --- doc/build/changelog/unreleased_14/7394.rst | 49 ++++++ doc/build/core/connections.rst | 127 +++++++++++++-- doc/build/core/expression_api.rst | 1 + doc/build/core/foundation.rst | 32 ++++ doc/build/core/sqlelement.rst | 16 -- doc/build/core/visitors.rst | 3 +- doc/build/errors.rst | 89 +++++++++++ doc/build/faq/performance.rst | 160 +++++++++++++++++++ examples/dogpile_caching/caching_query.py | 9 ++ lib/sqlalchemy/dialects/mssql/base.py | 1 + lib/sqlalchemy/dialects/mysql/dml.py | 1 + lib/sqlalchemy/dialects/postgresql/array.py | 1 + lib/sqlalchemy/dialects/postgresql/dml.py | 1 + lib/sqlalchemy/dialects/postgresql/ext.py | 2 + lib/sqlalchemy/dialects/postgresql/hstore.py | 8 + lib/sqlalchemy/dialects/sqlite/dml.py | 1 + lib/sqlalchemy/engine/default.py | 21 ++- lib/sqlalchemy/ext/compiler.py | 102 +++++++++++- lib/sqlalchemy/orm/attributes.py | 5 + lib/sqlalchemy/orm/interfaces.py | 48 +++++- lib/sqlalchemy/orm/query.py | 2 + lib/sqlalchemy/sql/base.py | 7 +- lib/sqlalchemy/sql/coercions.py | 16 ++ lib/sqlalchemy/sql/ddl.py | 3 + lib/sqlalchemy/sql/elements.py | 7 +- lib/sqlalchemy/sql/functions.py | 4 + lib/sqlalchemy/sql/roles.py | 5 + lib/sqlalchemy/sql/traversals.py | 101 +++++++++++- lib/sqlalchemy/sql/type_api.py | 15 +- lib/sqlalchemy/testing/assertions.py | 9 ++ test/dialect/mssql/test_compiler.py | 2 +- test/engine/test_execute.py | 1 + test/ext/test_baked.py | 1 + test/ext/test_compiler.py | 30 +++- test/orm/inheritance/test_assorted_poly.py | 2 +- test/orm/test_cache_key.py | 30 ++++ test/orm/test_lambdas.py | 2 +- test/orm/test_query.py | 2 + test/sql/test_compare.py | 79 ++++++++- test/sql/test_functions.py | 20 ++- test/sql/test_labels.py | 2 + test/sql/test_lambdas.py | 25 ++- test/sql/test_operators.py | 8 + test/sql/test_resultset.py | 2 + test/sql/test_types.py | 2 + 45 files changed, 979 insertions(+), 75 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7394.rst create mode 100644 doc/build/core/foundation.rst diff --git a/doc/build/changelog/unreleased_14/7394.rst b/doc/build/changelog/unreleased_14/7394.rst new file mode 100644 index 00000000000..66bda3e4e4c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7394.rst @@ -0,0 +1,49 @@ +.. change:: + :tags: bug, sql + :tickets: 7394 + + Custom SQL elements, third party dialects, custom or third party datatypes + will all generate consistent warnings when they do not clearly opt in or + out of SQL statement caching, which is achieved by setting the appropriate + attributes on each type of class. The warning links to documentation + sections which indicate the appropriate approach for each type of object in + order for caching to be enabled. + +.. change:: + :tags: bug, sql + :tickets: 7394 + + Fixed missing caching directives for a few lesser used classes in SQL Core + which would cause ``[no key]`` to be logged for elements which made use of + these. + +.. change:: + :tags: bug, postgresql + :tickets: 7394 + + Fixed missing caching directives for :class:`_postgresql.hstore` and + :class:`_postgresql.array` constructs which would cause ``[no key]`` + to be logged for these elements. + +.. change:: + :tags: bug, orm + :tickets: 7394 + + User defined ORM options, such as those illustrated in the dogpile.caching + example which subclass :class:`_orm.UserDefinedOption`, by definition are + handled on every statement execution and do not need to be considered as + part of the cache key for the statement. Caching of the base + :class:`.ExecutableOption` class has been modified so that it is no longer + a :class:`.HasCacheKey` subclass directly, so that the presence of user + defined option objects will not have the unwanted side effect of disabling + statement caching. Only ORM specific loader and criteria options, which are + all internal to SQLAlchemy, now participate within the caching system. + +.. change:: + :tags: bug, orm + :tickets: 7394 + + Fixed issue where mappings that made use of :func:`_orm.synonym` and + potentially other kinds of "proxy" attributes would not in all cases + successfully generate a cache key for their SQL statements, leading to + degraded performance for those statements. \ No newline at end of file diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 28d332203b6..c0efba0f5c0 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1026,6 +1026,8 @@ what the cache is doing, engine logging will include details about the cache's behavior, described in the next section. +.. _sql_caching_logging: + Estimating Cache Performance Using Logging ------------------------------------------ @@ -1293,28 +1295,35 @@ The cache can also be disabled with this argument by sending a value of Caching for Third Party Dialects --------------------------------- -The caching feature requires that the dialect's compiler produces a SQL -construct that is generically reusable given a particular cache key. This means +The caching feature requires that the dialect's compiler produces SQL +strings that are safe to reuse for many statement invocations, given +a particular cache key that is keyed to that SQL string. This means that any literal values in a statement, such as the LIMIT/OFFSET values for a SELECT, can not be hardcoded in the dialect's compilation scheme, as the compiled string will not be re-usable. SQLAlchemy supports rendered bound parameters using the :meth:`_sql.BindParameter.render_literal_execute` method which can be applied to the existing ``Select._limit_clause`` and -``Select._offset_clause`` attributes by a custom compiler. - -As there are many third party dialects, many of which may be generating -literal values from SQL statements without the benefit of the newer "literal execute" -feature, SQLAlchemy as of version 1.4.5 has added a flag to dialects known as -:attr:`_engine.Dialect.supports_statement_cache`. This flag is tested to be present -directly on a dialect class, and not any superclasses, so that even a third -party dialect that subclasses an existing cacheable SQLAlchemy dialect such -as ``sqlalchemy.dialects.postgresql.PGDialect`` must still specify this flag, +``Select._offset_clause`` attributes by a custom compiler, which +are illustrated later in this section. + +As there are many third party dialects, many of which may be generating literal +values from SQL statements without the benefit of the newer "literal execute" +feature, SQLAlchemy as of version 1.4.5 has added an attribute to dialects +known as :attr:`_engine.Dialect.supports_statement_cache`. This attribute is +checked at runtime for its presence directly on a particular dialect's class, +even if it's already present on a superclass, so that even a third party +dialect that subclasses an existing cacheable SQLAlchemy dialect such as +``sqlalchemy.dialects.postgresql.PGDialect`` must still explicitly include this +attribute for caching to be enabled. The attribute should **only** be enabled once the dialect has been altered as needed and tested for reusability of compiled SQL statements with differing parameters. -For all third party dialects that don't support this flag, the logging for -such a dialect will indicate ``dialect does not support caching``. Dialect -authors can apply the flag as follows:: +For all third party dialects that don't support this attribute, the logging for +such a dialect will indicate ``dialect does not support caching``. + +When a dialect has been tested against caching, and in particular the SQL +compiler has been updated to not render any literal LIMIT / OFFSET within +a SQL string directly, dialect authors can apply the attribute as follows:: from sqlalchemy.engine.default import DefaultDialect @@ -1328,6 +1337,96 @@ The flag needs to be applied to all subclasses of the dialect as well:: .. versionadded:: 1.4.5 + Added the :attr:`.Dialect.supports_statement_cache` attribute. + +The typical case for dialect modification follows. + +Example: Rendering LIMIT / OFFSET with post compile parameters +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As an example, suppose a dialect overrides the :meth:`.SQLCompiler.limit_clause` +method, which produces the "LIMIT / OFFSET" clause for a SQL statement, +like this:: + + # pre 1.4 style code + def limit_clause(self, select, **kw): + text = "" + if select._limit is not None: + text += " \n LIMIT %d" % (select._limit, ) + if select._offset is not None: + text += " \n OFFSET %d" % (select._offset, ) + return text + +The above routine renders the :attr:`.Select._limit` and +:attr:`.Select._offset` integer values as literal integers embedded in the SQL +statement. This is a common requirement for databases that do not support using +a bound parameter within the LIMIT/OFFSET clauses of a SELECT statement. +However, rendering the integer value within the initial compilation stage is +directly **incompatible** with caching as the limit and offset integer values +of a :class:`.Select` object are not part of the cache key, so that many +:class:`.Select` statements with different limit/offset values would not render +with the correct value. + +The correction for the above code is to move the literal integer into +SQLAlchemy's :ref:`post-compile ` facility, which will render the +literal integer outside of the initial compilation stage, but instead at +execution time before the statement is sent to the DBAPI. This is accessed +within the compilation stage using the :meth:`_sql.BindParameter.render_literal_execute` +method, in conjunction with using the :attr:`.Select._limit_clause` and +:attr:`.Select._offset_clause` attributes, which represent the LIMIT/OFFSET +as a complete SQL expression, as follows:: + + # 1.4 cache-compatible code + def limit_clause(self, select, **kw): + text = "" + + limit_clause = select._limit_clause + offset_clause = select._offset_clause + + if select._simple_int_clause(limit_clause): + text += " \n LIMIT %s" % ( + self.process(limit_clause.render_literal_execute(), **kw) + ) + elif limit_clause is not None: + # assuming the DB doesn't support SQL expressions for LIMIT. + # Otherwise render here normally + raise exc.CompileError( + "dialect 'mydialect' can only render simple integers for LIMIT" + ) + if select._simple_int_clause(offset_clause): + text += " \n OFFSET %s" % ( + self.process(offset_clause.render_literal_execute(), **kw) + ) + elif offset_clause is not None: + # assuming the DB doesn't support SQL expressions for OFFSET. + # Otherwise render here normally + raise exc.CompileError( + "dialect 'mydialect' can only render simple integers for OFFSET" + ) + + return text + +The approach above will generate a compiled SELECT statement that looks like:: + + SELECT x FROM y + LIMIT __[POSTCOMPILE_param_1] + OFFSET __[POSTCOMPILE_param_2] + +Where above, the ``__[POSTCOMPILE_param_1]`` and ``__[POSTCOMPILE_param_2]`` +indicators will be populated with their corresponding integer values at +statement execution time, after the SQL string has been retrieved from the +cache. + +After changes like the above have been made as appropriate, the +:attr:`.Dialect.supports_statement_cache` flag should be set to ``True``. +It is strongly recommended that third party dialects make use of the +`dialect third party test suite `_ +which will assert that operations like +SELECTs with LIMIT/OFFSET are correctly rendered and cached. + +.. seealso:: + + :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section .. _engine_lambda_caching: diff --git a/doc/build/core/expression_api.rst b/doc/build/core/expression_api.rst index 7d455d20010..236e0e2ee75 100644 --- a/doc/build/core/expression_api.rst +++ b/doc/build/core/expression_api.rst @@ -12,6 +12,7 @@ see :ref:`sqlexpression_toplevel`. .. toctree:: :maxdepth: 3 + foundation sqlelement operators selectable diff --git a/doc/build/core/foundation.rst b/doc/build/core/foundation.rst new file mode 100644 index 00000000000..3a017dd5dfe --- /dev/null +++ b/doc/build/core/foundation.rst @@ -0,0 +1,32 @@ +.. _core_foundation_toplevel: + +================================================= +SQL Expression Language Foundational Constructs +================================================= + +Base classes and mixins that are used to compose SQL Expression Language +elements. + +.. currentmodule:: sqlalchemy.sql.expression + +.. autoclass:: CacheKey + :members: + +.. autoclass:: ClauseElement + :members: + :inherited-members: + + +.. autoclass:: sqlalchemy.sql.base.DialectKWArgs + :members: + + +.. autoclass:: sqlalchemy.sql.traversals.HasCacheKey + :members: + +.. autoclass:: LambdaElement + :members: + +.. autoclass:: StatementLambdaElement + :members: + diff --git a/doc/build/core/sqlelement.rst b/doc/build/core/sqlelement.rst index 8e65993624d..499f26571a8 100644 --- a/doc/build/core/sqlelement.rst +++ b/doc/build/core/sqlelement.rst @@ -120,20 +120,12 @@ The classes here are generated using the constructors listed at .. autoclass:: BindParameter :members: -.. autoclass:: CacheKey - :members: - .. autoclass:: Case :members: .. autoclass:: Cast :members: -.. autoclass:: ClauseElement - :members: - :inherited-members: - - .. autoclass:: ClauseList :members: @@ -155,8 +147,6 @@ The classes here are generated using the constructors listed at :special-members: :inherited-members: -.. autoclass:: sqlalchemy.sql.base.DialectKWArgs - :members: .. autoclass:: Extract :members: @@ -170,9 +160,6 @@ The classes here are generated using the constructors listed at .. autoclass:: Label :members: -.. autoclass:: LambdaElement - :members: - .. autoclass:: Null :members: @@ -183,9 +170,6 @@ The classes here are generated using the constructors listed at .. autoclass:: Over :members: -.. autoclass:: StatementLambdaElement - :members: - .. autoclass:: TextClause :members: diff --git a/doc/build/core/visitors.rst b/doc/build/core/visitors.rst index 6ef466265d4..06d839d54cb 100644 --- a/doc/build/core/visitors.rst +++ b/doc/build/core/visitors.rst @@ -23,4 +23,5 @@ as well as when building out custom SQL expressions using the .. automodule:: sqlalchemy.sql.visitors :members: - :private-members: \ No newline at end of file + :private-members: + diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 2b163ec2692..376bfaf4344 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -172,6 +172,95 @@ In SQLAlchemy 1.4, this :term:`2.0 style` behavior is enabled when the :paramref:`_orm.Session.future` flag is set on :class:`_orm.sessionmaker` or :class:`_orm.Session`. +.. _error_cprf: +.. _caching_caveats: + +Object will not produce a cache key, Performance Implications +-------------------------------------------------------------- + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +Core and ORM SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement, allowing the +relatively expensive string compilation process to be skipped when another +structurally equivalent construct is next used. This system +relies upon functionality that is implemented for all SQL constructs, including +objects such as :class:`_schema.Column`, +:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a +**cache key** which fully represents their state to the degree that it affects +the SQL compilation process. + +If the warnings in question refer to widely used objects such as +:class:`_schema.Column` objects, and are shown to be affecting the majority of +SQL constructs being emitted (using the estimation techniques described at +:ref:`sql_caching_logging`) such that caching is generally not enabled for an +application, this will negatively impact performance and can in some cases +effectively produce a **performance degradation** compared to prior SQLAlchemy +versions. The FAQ at :ref:`faq_new_caching` covers this in additional detail. + +Caching disables itself if there's any doubt +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Caching relies on being able to generate a cache key that accurately represents +the **complete structure** of a statement in a **consistent** fashion. If a particular +SQL construct (or type) does not have the appropriate directives in place which +allow it to generate a proper cache key, then caching cannot be safely enabled: + +* The cache key must represent the **complete structure**: If the usage of two + separate instances of that construct may result in different SQL being + rendered, caching the SQL against the first instance of the element using a + cache key that does not capture the distinct differences between the first and + second elements will result in incorrect SQL being cached and rendered for the + second instance. + +* The cache key must be **consistent**: If a construct represents state that + changes every time, such as a literal value, producing unique SQL for every + instance of it, this construct is also not safe to cache, as repeated use of + the construct will quickly fill up the statement cache with unique SQL strings + that will likely not be used again, defeating the purpose of the cache. + +For the above two reasons, SQLAlchemy's caching system is **extremely +conservative** about deciding to cache the SQL corresponding to an object. + +Assertion attributes for caching +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The warning is emitted based on the criteria below. For further detail on +each, see the section :ref:`faq_new_caching`. + +* The :class:`.Dialect` itself (i.e. the module that is specified by the + first part of the URL we pass to :func:`_sa.create_engine`, like + ``postgresql+psycopg2://``), must indicate it has been reviewed and tested + to support caching correctly, which is indicated by the + :attr:`.Dialect.supports_statement_cache` attribute being set to ``True``. + When using third party dialects, consult with the maintainers of the dialect + so that they may follow the :ref:`steps to ensure caching may be enabled + ` in their dialect and publish a new release. + +* Third party or user defined types that inherit from either + :class:`.TypeDecorator` or :class:`.UserDefinedType` must include the + :attr:`.ExternalType.cache_ok` attribute in their definition, including for + all derived subclasses, following the guidelines described in the docstring + for :attr:`.ExternalType.cache_ok`. As before, if these datatypes are + imported from third party libraries, consult with the maintainers of that + library so that they may provide the necessary changes to their library and + publish a new release. + +* Third party or user defined SQL constructs that subclass from classes such + as :class:`.ClauseElement`, :class:`_schema.Column`, :class:`_dml.Insert` + etc, including simple subclasses as well as those which are designed to + work with the :ref:`sqlalchemy.ext.compiler_toplevel`, should normally + include the :attr:`.HasCacheKey.inherit_cache` attribute set to ``True`` + or ``False`` based on the design of the construct, following the guidelines + described at :ref:`compilerext_caching`. + +.. seealso:: + + :ref:`sql_caching_logging` - background on observing cache behavior + and efficiency + + :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section + .. _error_s9r1: Object is being merged into a Session along the backref cascade diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 6e144072131..781d6c79d34 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -8,6 +8,166 @@ Performance :class: faq :backlinks: none +.. _faq_new_caching: + +Why is my application slow after upgrading to 1.4 and/or 2.x? +-------------------------------------------------------------- + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +Core and ORM SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement, allowing the +relatively expensive string compilation process to be skipped when another +structurally equivalent construct is next used. This system +relies upon functionality that is implemented for all SQL constructs, including +objects such as :class:`_schema.Column`, +:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a +**cache key** which fully represents their state to the degree that it affects +the SQL compilation process. + +The caching system allows SQLAlchemy 1.4 and above to be more performant than +SQLAlchemy 1.3 with regards to the time spent converting SQL constructs into +strings repeatedly. However, this only works if caching is enabled for the +dialect and SQL constructs in use; if not, string compilation is usually +similar to that of SQLAlchemy 1.3, with a slight decrease in speed in some +cases. + +There is one case however where if SQLAlchemy's new caching system has been +disabled (for reasons below), performance for the ORM may be in fact +significantly poorer than that of 1.3 or other prior releases which is due to +the lack of caching within ORM lazy loaders and object refresh queries, which +in the 1.3 and earlier releases used the now-legacy ``BakedQuery`` system. If +an application is seeing significant (30% or higher) degradations in +performance (measured in time for operations to complete) when switching to +1.4, this is the likely cause of the issue, with steps to mitigate below. + +.. seealso:: + + :ref:`sql_caching` - overview of the caching system + + :ref:`caching_caveats` - additional information regarding the warnings + generated for elements that don't enable caching. + +Step one - turn on SQL logging and confirm whether or not caching is working +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Here, we want to use the technique described at +:ref:`engine logging `, looking for statements with the +``[no key]`` indicator or even ``[dialect does not support caching]``. +The indicators we would see for SQL statements that are successfully participating +in the caching system would be indicating ``[generated in Xs]`` when +statements are invoked for the first time and then +``[cached since Xs ago]`` for the vast majority of statements subsequent. +If ``[no key]`` is prevalent in particular for SELECT statements, or +if caching is disabled entirely due to ``[dialect does not support caching]``, +this can be the cause of significant performance degradation. + +.. seealso:: + + :ref:`sql_caching_logging` + + +Step two - identify what constructs are blocking caching from being enabled +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Assuming statements are not being cached, there should be warnings emitted +early in the application's log (SQLAlchemy 1.4.28 and above only) indicating +dialects, :class:`.TypeEngine` objects, and SQL constructs that are not +participating in caching. + +For user defined datatypes such as those which extend :class:`_types.TypeDecorator` +and :class:`_types.UserDefinedType`, the warnings will look like:: + + sqlalchemy.ext.SAWarning: MyType will not produce a cache key because the + ``cache_ok`` attribute is not set to True. This can have significant + performance implications including some performance degradations in + comparison to prior SQLAlchemy versions. Set this attribute to True if this + type object's state is safe to use in a cache key, or False to disable this + warning. + +For custom and third party SQL elements, such as those constructed using +the techniques described at :ref:`sqlalchemy.ext.compiler_toplevel`, these +warnings will look like:: + + sqlalchemy.exc.SAWarning: Class MyClass will not make use of SQL + compilation caching as it does not set the 'inherit_cache' attribute to + ``True``. This can have significant performance implications including some + performance degradations in comparison to prior SQLAlchemy versions. Set + this attribute to True if this object can make use of the cache key + generated by the superclass. Alternatively, this attribute may be set to + False which will disable this warning. + +For custom and third party dialects which make use of the :class:`.Dialect` +class hierarchy, the warnings will look like:: + + sqlalchemy.exc.SAWarning: Dialect database:driver will not make use of SQL + compilation caching as it does not set the 'supports_statement_cache' + attribute to ``True``. This can have significant performance implications + including some performance degradations in comparison to prior SQLAlchemy + versions. Dialect maintainers should seek to set this attribute to True + after appropriate development and testing for SQLAlchemy 1.4 caching + support. Alternatively, this attribute may be set to False which will + disable this warning. + + +Step three - enable caching for the given objects and/or seek alternatives +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Steps to mitigate the lack of caching include: + +* Review and set :attr:`.ExternalType.cache_ok` to ``True`` for all custom types + which extend from :class:`_types.TypeDecorator`, + :class:`_types.UserDefinedType`, as well as subclasses of these such as + :class:`_types.PickleType`. Set this **only** if the custom type does not + include any additional state attributes which affect how it renders SQL:: + + class MyCustomType(TypeDecorator): + cache_ok = True + impl = String + + If the types in use are from a third-party library, consult with the + maintainers of that library so that it may be adjusted and released. + + .. seealso:: + + :attr:`.ExternalType.cache_ok` - background on requirements to enable + caching for custom datatypes. + +* Make sure third party dialects set :attr:`.Dialect.supports_statement_cache` + to ``True``. What this indicates is that the maintainers of a third party + dialect have made sure their dialect works with SQLAlchemy 1.4 or greater, + and that their dialect doesn't include any compilation features which may get + in the way of caching. As there are some common compilation patterns which + can in fact interfere with caching, it's important that dialect maintainers + check and test this carefully, adjusting for any of the legacy patterns + which won't work with caching. + + .. seealso:: + + :ref:`engine_thirdparty_caching` - background and examples for third-party + dialects to participate in SQL statement caching. + +* Custom SQL classes, including all DQL / DML constructs one might create + using the :ref:`sqlalchemy.ext.compiler_toplevel`, as well as ad-hoc + subclasses of objects such as :class:`_schema.Column` or + :class:`_schema.Table`. The :attr:`.HasCacheKey.inherit_cache` attribute + may be set to ``True`` for trivial subclasses, which do not contain any + subclass-specific state information which affects the SQL compilation. + + .. seealso:: + + :ref:`compilerext_caching` - guidelines for applying the + :attr:`.HasCacheKey.inherit_cache` attribute. + + +.. seealso:: + + :ref:`sql_caching` - caching system overview + + :ref:`caching_caveats` - background on warnings emitted when caching + is not enabled for specific constructs and/or dialects. + + .. _faq_how_to_profile: How can I profile a SQLAlchemy powered application? diff --git a/examples/dogpile_caching/caching_query.py b/examples/dogpile_caching/caching_query.py index 68f72e5f06e..f5065f8df54 100644 --- a/examples/dogpile_caching/caching_query.py +++ b/examples/dogpile_caching/caching_query.py @@ -130,10 +130,19 @@ def __init__( self.expiration_time = expiration_time self.ignore_expiration = ignore_expiration + # this is not needed as of SQLAlchemy 1.4.28; + # UserDefinedOption classes no longer participate in the SQL + # compilation cache key def _gen_cache_key(self, anon_map, bindparams): return None def _generate_cache_key(self, statement, parameters, orm_cache): + """generate a cache key with which to key the results of a statement. + + This leverages the use of the SQL compilation cache key which is + repurposed as a SQL results key. + + """ statement_cache_key = statement._generate_cache_key() key = statement_cache_key.to_offline_string( diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 8d2bc36ee05..d6a35c93768 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1354,6 +1354,7 @@ class TryCast(sql.elements.Cast): __visit_name__ = "try_cast" stringify_dialect = "mssql" + inherit_cache = True def __init__(self, *arg, **kw): """Create a TRY_CAST expression. diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index e2f78783c22..790733cbfda 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -25,6 +25,7 @@ class Insert(StandardInsert): """ stringify_dialect = "mysql" + inherit_cache = False @property def inserted(self): diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 0cb574dacf7..e57a4fc9acc 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -87,6 +87,7 @@ class array(expression.ClauseList, expression.ColumnElement): __visit_name__ = "array" stringify_dialect = "postgresql" + inherit_cache = True def __init__(self, clauses, **kw): clauses = [ diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index bb6345cf438..4104fe51f78 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -35,6 +35,7 @@ class Insert(StandardInsert): """ stringify_dialect = "postgresql" + inherit_cache = False @util.memoized_property def excluded(self): diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index f9e4c1d6cb9..8c3a539be04 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -54,6 +54,7 @@ class aggregate_order_by(expression.ColumnElement): __visit_name__ = "aggregate_order_by" stringify_dialect = "postgresql" + inherit_cache = False def __init__(self, target, *order_by): self.target = coercions.expect(roles.ExpressionElementRole, target) @@ -99,6 +100,7 @@ class ExcludeConstraint(ColumnCollectionConstraint): __visit_name__ = "exclude_constraint" where = None + inherit_cache = False create_drop_stringify_dialect = "postgresql" diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index a4090f1ac59..7f42c3ab4e0 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -296,41 +296,49 @@ class hstore(sqlfunc.GenericFunction): type = HSTORE name = "hstore" + inherit_cache = True class _HStoreDefinedFunction(sqlfunc.GenericFunction): type = sqltypes.Boolean name = "defined" + inherit_cache = True class _HStoreDeleteFunction(sqlfunc.GenericFunction): type = HSTORE name = "delete" + inherit_cache = True class _HStoreSliceFunction(sqlfunc.GenericFunction): type = HSTORE name = "slice" + inherit_cache = True class _HStoreKeysFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "akeys" + inherit_cache = True class _HStoreValsFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "avals" + inherit_cache = True class _HStoreArrayFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "hstore_to_array" + inherit_cache = True class _HStoreMatrixFunction(sqlfunc.GenericFunction): type = ARRAY(sqltypes.Text) name = "hstore_to_matrix" + inherit_cache = True # diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index a93e31beba2..e4d8bd9434d 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -36,6 +36,7 @@ class Insert(StandardInsert): """ stringify_dialect = "sqlite" + inherit_cache = False @util.memoized_property def excluded(self): diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 1adb8861745..0dac6600ef3 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -350,10 +350,23 @@ def _ensure_has_table_connection(self, arg): @util.memoized_property def _supports_statement_cache(self): - return ( - self.__class__.__dict__.get("supports_statement_cache", False) - is True - ) + ssc = self.__class__.__dict__.get("supports_statement_cache", None) + if ssc is None: + util.warn( + "Dialect %s:%s will not make use of SQL compilation caching " + "as it does not set the 'supports_statement_cache' attribute " + "to ``True``. This can have " + "significant performance implications including some " + "performance degradations in comparison to prior SQLAlchemy " + "versions. Dialect maintainers should seek to set this " + "attribute to True after appropriate development and testing " + "for SQLAlchemy 1.4 caching support. Alternatively, this " + "attribute may be set to False which will disable this " + "warning." % (self.name, self.driver), + code="cprf", + ) + + return bool(ssc) @util.memoized_property def _type_memos(self): diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index c7eb7cc323b..3470407158a 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -18,7 +18,7 @@ from sqlalchemy.sql.expression import ColumnClause class MyColumn(ColumnClause): - pass + inherit_cache = True @compiles(MyColumn) def compile_mycolumn(element, compiler, **kw): @@ -47,6 +47,7 @@ def compile_mycolumn(element, compiler, **kw): from sqlalchemy.schema import DDLElement class AlterColumn(DDLElement): + inherit_cache = False def __init__(self, column, cmd): self.column = column @@ -64,6 +65,8 @@ def visit_alter_column(element, compiler, **kw): The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used. +.. _compilerext_compiling_subelements: + Compiling sub-elements of a custom expression construct ======================================================= @@ -78,6 +81,8 @@ def visit_alter_column(element, compiler, **kw): from sqlalchemy.sql.expression import Executable, ClauseElement class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + def __init__(self, table, select): self.table = table self.select = select @@ -252,6 +257,7 @@ def compile_varchar(element, compiler, **kw): class timestamp(ColumnElement): type = TIMESTAMP() + inherit_cache = True * :class:`~sqlalchemy.sql.functions.FunctionElement` - This is a hybrid of a ``ColumnElement`` and a "from clause" like object, and represents a SQL @@ -264,6 +270,7 @@ class timestamp(ColumnElement): class coalesce(FunctionElement): name = 'coalesce' + inherit_cache = True @compiles(coalesce) def compile(element, compiler, **kw): @@ -287,6 +294,95 @@ def compile(element, compiler, **kw): SQL statement that can be passed directly to an ``execute()`` method. It is already implicit within ``DDLElement`` and ``FunctionElement``. +Most of the above constructs also respond to SQL statement caching. A +subclassed construct will want to define the caching behavior for the object, +which usually means setting the flag ``inherit_cache`` to the value of +``False`` or ``True``. See the next section :ref:`compilerext_caching` +for background. + + +.. _compilerext_caching: + +Enabling Caching Support for Custom Constructs +============================================== + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +equivalent SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement. + +For reasons discussed at :ref:`caching_caveats`, the implementation of this +caching system takes a conservative approach towards including custom SQL +constructs and/or subclasses within the caching system. This includes that +any user-defined SQL constructs, including all the examples for this +extension, will not participate in caching by default unless they positively +assert that they are able to do so. The :attr:`.HasCacheKey.inherit_cache` +attribute when set to ``True`` at the class level of a specific subclass +will indicate that instances of this class may be safely cached, using the +cache key generation scheme of the immediate superclass. This applies +for example to the "synopsis" example indicated previously:: + + class MyColumn(ColumnClause): + inherit_cache = True + + @compiles(MyColumn) + def compile_mycolumn(element, compiler, **kw): + return "[%s]" % element.name + +Above, the ``MyColumn`` class does not include any new state that +affects its SQL compilation; the cache key of ``MyColumn`` instances will +make use of that of the ``ColumnClause`` superclass, meaning it will take +into account the class of the object (``MyColumn``), the string name and +datatype of the object:: + + >>> MyColumn("some_name", String())._generate_cache_key() + CacheKey( + key=('0', , + 'name', 'some_name', + 'type', (, + ('length', None), ('collation', None)) + ), bindparams=[]) + +For objects that are likely to be **used liberally as components within many +larger statements**, such as :class:`_schema.Column` subclasses and custom SQL +datatypes, it's important that **caching be enabled as much as possible**, as +this may otherwise negatively affect performance. + +An example of an object that **does** contain state which affects its SQL +compilation is the one illustrated at :ref:`compilerext_compiling_subelements`; +this is an "INSERT FROM SELECT" construct that combines together a +:class:`_schema.Table` as well as a :class:`_sql.Select` construct, each of +which independently affect the SQL string generation of the construct. For +this class, the example illustrates that it simply does not participate in +caching:: + + class InsertFromSelect(Executable, ClauseElement): + inherit_cache = False + + def __init__(self, table, select): + self.table = table + self.select = select + + @compiles(InsertFromSelect) + def visit_insert_from_select(element, compiler, **kw): + return "INSERT INTO %s (%s)" % ( + compiler.process(element.table, asfrom=True, **kw), + compiler.process(element.select, **kw) + ) + +While it is also possible that the above ``InsertFromSelect`` could be made to +produce a cache key that is composed of that of the :class:`_schema.Table` and +:class:`_sql.Select` components together, the API for this is not at the moment +fully public. However, for an "INSERT FROM SELECT" construct, which is only +used by itself for specific operations, caching is not as critical as in the +previous example. + +For objects that are **used in relative isolation and are generally +standalone**, such as custom :term:`DML` constructs like an "INSERT FROM +SELECT", **caching is generally less critical** as the lack of caching for such +a construct will have only localized implications for that specific operation. + + Further Examples ================ @@ -309,6 +405,7 @@ def compile(element, compiler, **kw): class utcnow(expression.FunctionElement): type = DateTime() + inherit_cache = True @compiles(utcnow, 'postgresql') def pg_utcnow(element, compiler, **kw): @@ -345,6 +442,7 @@ def ms_utcnow(element, compiler, **kw): class greatest(expression.FunctionElement): type = Numeric() name = 'greatest' + inherit_cache = True @compiles(greatest) def default_greatest(element, compiler, **kw): @@ -376,7 +474,7 @@ def case_greatest(element, compiler, **kw): from sqlalchemy.ext.compiler import compiles class sql_false(expression.ColumnElement): - pass + inherit_cache = True @compiles(sql_false) def default_false(element, compiler, **kw): diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 513144b8728..9d1e27d977e 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -526,6 +526,11 @@ def __init__( _is_internal_proxy = True + _cache_key_traversal = [ + ("key", visitors.ExtendedInternalTraversal.dp_string), + ("_parententity", visitors.ExtendedInternalTraversal.dp_multi), + ] + @property def _impl_uses_objects(self): return ( diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 9eb362c437b..6182588dce6 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -755,15 +755,54 @@ class ORMOption(ExecutableOption): _is_strategy_option = False -class LoaderOption(ORMOption): - """Describe a loader modification to an ORM statement at compilation time. +class CompileStateOption(HasCacheKey, ORMOption): + """base for :class:`.ORMOption` classes that affect the compilation of + a SQL query and therefore need to be part of the cache key. + + .. note:: :class:`.CompileStateOption` is generally non-public and + should not be used as a base class for user-defined options; instead, + use :class:`.UserDefinedOption`, which is easier to use as it does not + interact with ORM compilation internals or caching. + + :class:`.CompileStateOption` defines an internal attribute + ``_is_compile_state=True`` which has the effect of the ORM compilation + routines for SELECT and other statements will call upon these options when + a SQL string is being compiled. As such, these classes implement + :class:`.HasCacheKey` and need to provide robust ``_cache_key_traversal`` + structures. + + The :class:`.CompileStateOption` class is used to implement the ORM + :class:`.LoaderOption` and :class:`.CriteriaOption` classes. + + .. versionadded:: 1.4.28 - .. versionadded:: 1.4 """ _is_compile_state = True + def process_compile_state(self, compile_state): + """Apply a modification to a given :class:`.CompileState`.""" + + def process_compile_state_replaced_entities( + self, compile_state, mapper_entities + ): + """Apply a modification to a given :class:`.CompileState`, + given entities that were replaced by with_only_columns() or + with_entities(). + + .. versionadded:: 1.4.19 + + """ + + +class LoaderOption(CompileStateOption): + """Describe a loader modification to an ORM statement at compilation time. + + .. versionadded:: 1.4 + + """ + def process_compile_state_replaced_entities( self, compile_state, mapper_entities ): @@ -780,7 +819,7 @@ def process_compile_state(self, compile_state): """Apply a modification to a given :class:`.CompileState`.""" -class CriteriaOption(ORMOption): +class CriteriaOption(CompileStateOption): """Describe a WHERE criteria modification to an ORM statement at compilation time. @@ -788,7 +827,6 @@ class CriteriaOption(ORMOption): """ - _is_compile_state = True _is_criteria_option = True def process_compile_state(self, compile_state): diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index bd897211cad..b412caa12f6 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -3425,6 +3425,8 @@ def __init__(self, alias): """ + inherit_cache = False + def process_compile_state(self, compile_state): pass diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index aba80222a6b..e04d3b75f0c 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -768,11 +768,13 @@ def _generate_cache_key(self): return HasCacheKey._generate_cache_key_for_object(self) -class ExecutableOption(HasCopyInternals, HasCacheKey): +class ExecutableOption(HasCopyInternals): _annotations = util.EMPTY_DICT __visit_name__ = "executable_option" + _is_has_cache_key = False + def _clone(self, **kw): """Create a shallow copy of this ExecutableOption.""" c = self.__class__.__new__(self.__class__) @@ -846,7 +848,8 @@ def options(self, *options): """ self._with_options += tuple( - coercions.expect(roles.HasCacheKeyRole, opt) for opt in options + coercions.expect(roles.ExecutableOptionRole, opt) + for opt in options ) @_generative diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index f888bad4cac..e378d9345f2 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -11,6 +11,7 @@ from . import operators from . import roles from . import visitors +from .base import ExecutableOption from .base import Options from .traversals import HasCacheKey from .visitors import Visitable @@ -458,6 +459,21 @@ def _literal_coercion(self, element, **kw): return element +class ExecutableOptionImpl(RoleImpl): + __slots__ = () + + def _implicit_coercions( + self, original_element, resolved, argname=None, **kw + ): + if isinstance(original_element, ExecutableOption): + return original_element + else: + self._raise_for_expected(original_element, argname, resolved) + + def _literal_coercion(self, element, **kw): + return element + + class ExpressionElementImpl(_ColumnCoercions, RoleImpl): __slots__ = () diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index f8985548ee0..b79fee17931 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -22,6 +22,9 @@ class _DDLCompiles(ClauseElement): + _hierarchy_supports_caching = False + """disable cache warnings for all _DDLCompiles subclasses. """ + def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index a276e2cae1e..08eb37f2ce7 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -3654,6 +3654,8 @@ class CollectionAggregate(UnaryExpression): """ + inherit_cache = True + @classmethod def _create_any(cls, expr): """Produce an ANY expression. @@ -3961,7 +3963,7 @@ class IndexExpression(BinaryExpression): """Represent the class of expressions that are like an "index" operation.""" - pass + inherit_cache = True class GroupedElement(ClauseElement): @@ -5054,14 +5056,17 @@ def __init__(self, ident): class SavepointClause(_IdentifiedClause): __visit_name__ = "savepoint" + inherit_cache = False class RollbackToSavepointClause(_IdentifiedClause): __visit_name__ = "rollback_to_savepoint" + inherit_cache = False class ReleaseSavepointClause(_IdentifiedClause): __visit_name__ = "release_savepoint" + inherit_cache = False class quoted_name(util.MemoizedSlots, util.text_type): diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 5729f81f512..4f3cf65b471 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -977,6 +977,7 @@ class that is instantiated automatically when called class as_utc(GenericFunction): type = DateTime + inherit_cache = True print(select(func.as_utc())) @@ -991,6 +992,7 @@ class as_utc(GenericFunction): class as_utc(GenericFunction): type = DateTime package = "time" + inherit_cache = True The above function would be available from :data:`.func` using the package name ``time``:: @@ -1008,6 +1010,7 @@ class GeoBuffer(GenericFunction): package = "geo" name = "ST_Buffer" identifier = "buffer" + inherit_cache = True The above function will render as follows:: @@ -1026,6 +1029,7 @@ class GeoBuffer(GenericFunction): package = "geo" name = quoted_name("ST_Buffer", True) identifier = "buffer" + inherit_cache = True The above function will render as:: diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index 70ad4cefa7f..6f25ec97570 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -36,6 +36,11 @@ class HasCacheKeyRole(SQLRole): _role_name = "Cacheable Core or ORM object" +class ExecutableOptionRole(SQLRole): + __slots__ = () + _role_name = "ExecutionOption Core or ORM object" + + class LiteralValueRole(SQLRole): _role_name = "Literal Python value" diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 3d377271f99..27e65652654 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -49,7 +49,50 @@ def _preconfigure_traversals(target_hierarchy): class HasCacheKey(object): + """Mixin for objects which can produce a cache key. + + .. seealso:: + + :class:`.CacheKey` + + :ref:`sql_caching` + + """ + _cache_key_traversal = NO_CACHE + + _is_has_cache_key = True + + _hierarchy_supports_caching = True + """private attribute which may be set to False to prevent the + inherit_cache warning from being emitted for a hierarchy of subclasses. + + Currently applies to the DDLElement hierarchy which does not implement + caching. + + """ + + inherit_cache = None + """Indicate if this :class:`.HasCacheKey` instance should make use of the + cache key generation scheme used by its immediate superclass. + + The attribute defaults to ``None``, which indicates that a construct has + not yet taken into account whether or not its appropriate for it to + participate in caching; this is functionally equivalent to setting the + value to ``False``, except that a warning is also emitted. + + This flag can be set to ``True`` on a particular class, if the SQL that + corresponds to the object does not change based on attributes which + are local to this class, and not its superclass. + + .. seealso:: + + :ref:`compilerext_caching` - General guideslines for setting the + :attr:`.HasCacheKey.inherit_cache` attribute for third-party or user + defined SQL constructs. + + """ + __slots__ = () @classmethod @@ -60,7 +103,8 @@ def _generate_cache_attrs(cls): so should only be called once per class. """ - inherit = cls.__dict__.get("inherit_cache", False) + inherit_cache = cls.__dict__.get("inherit_cache", None) + inherit = bool(inherit_cache) if inherit: _cache_key_traversal = getattr(cls, "_cache_key_traversal", None) @@ -89,6 +133,23 @@ def _generate_cache_attrs(cls): ) if _cache_key_traversal is None: cls._generated_cache_key_traversal = NO_CACHE + if ( + inherit_cache is None + and cls._hierarchy_supports_caching + ): + util.warn( + "Class %s will not make use of SQL compilation " + "caching as it does not set the 'inherit_cache' " + "attribute to ``True``. This can have " + "significant performance implications including " + "some performance degradations in comparison to " + "prior SQLAlchemy versions. Set this attribute " + "to True if this object can make use of the cache " + "key generated by the superclass. Alternatively, " + "this attribute may be set to False which will " + "disable this warning." % (cls.__name__), + code="cprf", + ) return NO_CACHE return _cache_key_traversal_visitor.generate_dispatch( @@ -273,6 +334,15 @@ def _generate_cache_key(self): class CacheKey(namedtuple("CacheKey", ["key", "bindparams"])): + """The key used to identify a SQL statement construct in the + SQL compilation cache. + + .. seealso:: + + :ref:`sql_caching` + + """ + def __hash__(self): """CacheKey itself is not hashable - hash the .key portion""" @@ -480,7 +550,19 @@ def visit_has_cache_key_list( tuple(elem._gen_cache_key(anon_map, bindparams) for elem in obj), ) - visit_executable_options = visit_has_cache_key_list + def visit_executable_options( + self, attrname, obj, parent, anon_map, bindparams + ): + if not obj: + return () + return ( + attrname, + tuple( + elem._gen_cache_key(anon_map, bindparams) + for elem in obj + if elem._is_has_cache_key + ), + ) def visit_inspectable_list( self, attrname, obj, parent, anon_map, bindparams @@ -1102,7 +1184,20 @@ def visit_has_cache_key_list( ): return COMPARE_FAILED - visit_executable_options = visit_has_cache_key_list + def visit_executable_options( + self, attrname, left_parent, left, right_parent, right, **kw + ): + for l, r in util.zip_longest(left, right, fillvalue=None): + if ( + l._gen_cache_key(self.anon_map[0], []) + if l._is_has_cache_key + else l + ) != ( + r._gen_cache_key(self.anon_map[1], []) + if r._is_has_cache_key + else r + ): + return COMPARE_FAILED def visit_clauseelement( self, attrname, left_parent, left, right_parent, right, **kw diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 326f2be8bb5..49f6cfe204a 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -969,18 +969,23 @@ def bind_processor(self, dialect): @property def _static_cache_key(self): - if self.cache_ok is None: + cache_ok = self.__class__.__dict__.get("cache_ok", None) + + if cache_ok is None: subtype_idx = self.__class__.__mro__.index(ExternalType) subtype = self.__class__.__mro__[max(subtype_idx - 1, 0)] util.warn( "%s %r will not produce a cache key because " - "the ``cache_ok`` flag is not set to True. " - "Set this flag to True if this type object's " + "the ``cache_ok`` attribute is not set to True. This can " + "have significant performance implications including some " + "performance degradations in comparison to prior SQLAlchemy " + "versions. Set this attribute to True if this type object's " "state is safe to use in a cache key, or False to " - "disable this warning." % (subtype.__name__, self) + "disable this warning." % (subtype.__name__, self), + code="cprf", ) - elif self.cache_ok is True: + elif cache_ok is True: return super(ExternalType, self)._static_cache_key return NO_CACHE diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 6bf14aecde9..ea453813a55 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -552,6 +552,15 @@ def _compiler_dispatch(self, compiler, **kwargs): # are the "self.statement" element c = CheckCompilerAccess(clause).compile(dialect=dialect, **kw) + if isinstance(clause, sqltypes.TypeEngine): + cache_key_no_warnings = clause._static_cache_key + if cache_key_no_warnings: + hash(cache_key_no_warnings) + else: + cache_key_no_warnings = clause._generate_cache_key() + if cache_key_no_warnings: + hash(cache_key_no_warnings[0]) + param_str = repr(getattr(c, "params", {})) if util.py3k: param_str = param_str.encode("utf-8").decode("ascii", "ignore") diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py index 1f76e0969e7..bad5e4e10b6 100644 --- a/test/dialect/mssql/test_compiler.py +++ b/test/dialect/mssql/test_compiler.py @@ -181,7 +181,7 @@ def test_update_exclude_hint(self): t.update() .where(t.c.somecolumn == "q") .values(somecolumn="x") - .with_hint("XYZ", "mysql"), + .with_hint("XYZ", dialect_name="mysql"), "UPDATE sometable SET somecolumn=:somecolumn " "WHERE sometable.somecolumn = :somecolumn_1", ) diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index bb90f66d340..f462a7035cc 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -3964,6 +3964,7 @@ def translate_select_structure(self, select_stmt, **kwargs): class MyDialect(SQLiteDialect_pysqlite): statement_compiler = MyCompiler + supports_statement_cache = False from sqlalchemy.dialects import registry diff --git a/test/ext/test_baked.py b/test/ext/test_baked.py index 977fb363909..a7fb1ec766e 100644 --- a/test/ext/test_baked.py +++ b/test/ext/test_baked.py @@ -1043,6 +1043,7 @@ def _option_fixture(self): from sqlalchemy.orm.interfaces import UserDefinedOption class RelationshipCache(UserDefinedOption): + inherit_cache = True propagate_to_loaders = True diff --git a/test/ext/test_compiler.py b/test/ext/test_compiler.py index 7fb0213292c..99679712200 100644 --- a/test/ext/test_compiler.py +++ b/test/ext/test_compiler.py @@ -37,6 +37,8 @@ class UserDefinedTest(fixtures.TestBase, AssertsCompiledSQL): def test_column(self): class MyThingy(ColumnClause): + inherit_cache = False + def __init__(self, arg=None): super(MyThingy, self).__init__(arg or "MYTHINGY!") @@ -96,7 +98,7 @@ def visit_pg_type(type_, compiler, **kw): def test_no_compile_for_col_label(self): class MyThingy(FunctionElement): - pass + inherit_cache = True @compiles(MyThingy) def visit_thingy(thingy, compiler, **kw): @@ -120,6 +122,8 @@ def visit_thingy_pg(thingy, compiler, **kw): def test_stateful(self): class MyThingy(ColumnClause): + inherit_cache = False + def __init__(self): super(MyThingy, self).__init__("MYTHINGY!") @@ -142,6 +146,8 @@ def visit_thingy(thingy, compiler, **kw): def test_callout_to_compiler(self): class InsertFromSelect(ClauseElement): + inherit_cache = False + def __init__(self, table, select): self.table = table self.select = select @@ -162,7 +168,7 @@ def visit_insert_from_select(element, compiler, **kw): def test_no_default_but_has_a_visit(self): class MyThingy(ColumnClause): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -172,7 +178,7 @@ def visit_thingy(thingy, compiler, **kw): def test_no_default_has_no_visit(self): class MyThingy(TypeEngine): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -189,6 +195,7 @@ def visit_thingy(thingy, compiler, **kw): @testing.combinations((True,), (False,)) def test_no_default_proxy_generation(self, named): class my_function(FunctionElement): + inherit_cache = False if named: name = "my_function" type = Numeric() @@ -215,7 +222,7 @@ def sqlite_my_function(element, compiler, **kw): def test_no_default_message(self): class MyThingy(ClauseElement): - pass + inherit_cache = False @compiles(MyThingy, "postgresql") def visit_thingy(thingy, compiler, **kw): @@ -314,7 +321,7 @@ def test_functions(self): from sqlalchemy.dialects import postgresql class MyUtcFunction(FunctionElement): - pass + inherit_cache = True @compiles(MyUtcFunction) def visit_myfunc(element, compiler, **kw): @@ -335,7 +342,7 @@ def visit_myfunc_pg(element, compiler, **kw): def test_functions_args_noname(self): class myfunc(FunctionElement): - pass + inherit_cache = True @compiles(myfunc) def visit_myfunc(element, compiler, **kw): @@ -351,6 +358,7 @@ def test_function_calls_base(self): class greatest(FunctionElement): type = Numeric() name = "greatest" + inherit_cache = True @compiles(greatest) def default_greatest(element, compiler, **kw): @@ -380,12 +388,15 @@ def case_greatest(element, compiler, **kw): def test_function_subclasses_one(self): class Base(FunctionElement): + inherit_cache = True name = "base" class Sub1(Base): + inherit_cache = True name = "sub1" class Sub2(Base): + inherit_cache = True name = "sub2" @compiles(Base) @@ -407,6 +418,7 @@ class Base(FunctionElement): name = "base" class Sub1(Base): + inherit_cache = True name = "sub1" @compiles(Base) @@ -414,9 +426,11 @@ def visit_base(element, compiler, **kw): return element.name class Sub2(Base): + inherit_cache = True name = "sub2" class SubSub1(Sub1): + inherit_cache = True name = "subsub1" self.assert_compile( @@ -545,7 +559,7 @@ def define_tables(cls, metadata): @testing.fixture() def insert_fixture(self): class MyInsert(Executable, ClauseElement): - pass + inherit_cache = True @compiles(MyInsert) def _run_myinsert(element, compiler, **kw): @@ -556,7 +570,7 @@ def _run_myinsert(element, compiler, **kw): @testing.fixture() def select_fixture(self): class MySelect(Executable, ClauseElement): - pass + inherit_cache = True @compiles(MySelect) def _run_myinsert(element, compiler, **kw): diff --git a/test/orm/inheritance/test_assorted_poly.py b/test/orm/inheritance/test_assorted_poly.py index 729e1ee0479..3d17d702382 100644 --- a/test/orm/inheritance/test_assorted_poly.py +++ b/test/orm/inheritance/test_assorted_poly.py @@ -2252,7 +2252,7 @@ class A(Base): id = Column(Integer, primary_key=True) class MySpecialColumn(Column): - pass + inherit_cache = True class B(A): __tablename__ = "b" diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index 7fb232b0b87..f42e59216a0 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -26,6 +26,7 @@ from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import synonym from sqlalchemy.orm import with_expression from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm import with_polymorphic @@ -387,6 +388,35 @@ def test_orm_query_using_with_entities(self): compare_values=True, ) + def test_synonyms(self, registry): + """test for issue discovered in #7394""" + + @registry.mapped + class User2(object): + __table__ = self.tables.users + + name_syn = synonym("name") + + @registry.mapped + class Address2(object): + __table__ = self.tables.addresses + + name_syn = synonym("email_address") + + self._run_cache_key_fixture( + lambda: ( + User2.id, + User2.name, + User2.name_syn, + Address2.name_syn, + Address2.email_address, + aliased(User2).name_syn, + aliased(User2, name="foo").name_syn, + aliased(User2, name="bar").name_syn, + ), + compare_values=True, + ) + def test_more_with_entities_sanity_checks(self): """test issue #6503""" User, Address, Keyword, Order, Item = self.classes( diff --git a/test/orm/test_lambdas.py b/test/orm/test_lambdas.py index 5274271d9fa..6de702ad4b0 100644 --- a/test/orm/test_lambdas.py +++ b/test/orm/test_lambdas.py @@ -219,7 +219,7 @@ def test_lambdas_rejected_in_options(self, plain_fixture): assert_raises_message( exc.ArgumentError, - "Cacheable Core or ORM object expected, got", + "ExecutionOption Core or ORM object expected, got", select(lambda: User).options, lambda: subqueryload(User.addresses), ) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 433c11afc90..8bf3dcdb5c5 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -2113,6 +2113,7 @@ def test_function_element_column_labels(self): class max_(expression.FunctionElement): name = "max" + inherit_cache = True @compiles(max_) def visit_max(element, compiler, **kw): @@ -2127,6 +2128,7 @@ def test_truly_unlabeled_sql_expressions(self): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index fe3512375ac..a5252601c42 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -16,6 +16,7 @@ from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import or_ +from sqlalchemy import PickleType from sqlalchemy import select from sqlalchemy import String from sqlalchemy import Table @@ -1265,13 +1266,20 @@ class Foobar2(ColumnElement): # the None for cache key will prevent objects # which contain these elements from being cached. f1 = Foobar1() - eq_(f1._generate_cache_key(), None) + with expect_warnings( + "Class Foobar1 will not make use of SQL compilation caching" + ): + eq_(f1._generate_cache_key(), None) f2 = Foobar2() - eq_(f2._generate_cache_key(), None) + with expect_warnings( + "Class Foobar2 will not make use of SQL compilation caching" + ): + eq_(f2._generate_cache_key(), None) s1 = select(column("q"), Foobar2()) + # warning is memoized, won't happen the second time eq_(s1._generate_cache_key(), None) def test_get_children_no_method(self): @@ -1356,6 +1364,7 @@ def test_all_present(self): and ( "__init__" in cls.__dict__ or issubclass(cls, AliasedReturnsRows) + or "inherit_cache" not in cls.__dict__ ) and not issubclass(cls, (Annotated)) and "orm" not in cls.__module__ @@ -1820,3 +1829,69 @@ def go3(): eq_(c1, c2) ne_(c1, c3) eq_(c1, c4) + + def test_thirdparty_sub_subclass_no_cache(self): + class MyType(PickleType): + pass + + expr = column("q", MyType()) == 1 + + with expect_warnings( + r"TypeDecorator MyType\(\) will not produce a cache key" + ): + is_(expr._generate_cache_key(), None) + + def test_userdefined_sub_subclass_no_cache(self): + class MyType(UserDefinedType): + cache_ok = True + + class MySubType(MyType): + pass + + expr = column("q", MySubType()) == 1 + + with expect_warnings( + r"UserDefinedType MySubType\(\) will not produce a cache key" + ): + is_(expr._generate_cache_key(), None) + + def test_userdefined_sub_subclass_cache_ok(self): + class MyType(UserDefinedType): + cache_ok = True + + class MySubType(MyType): + cache_ok = True + + def go1(): + expr = column("q", MySubType()) == 1 + return expr + + def go2(): + expr = column("p", MySubType()) == 1 + return expr + + c1 = go1()._generate_cache_key()[0] + c2 = go1()._generate_cache_key()[0] + c3 = go2()._generate_cache_key()[0] + + eq_(c1, c2) + ne_(c1, c3) + + def test_thirdparty_sub_subclass_cache_ok(self): + class MyType(PickleType): + cache_ok = True + + def go1(): + expr = column("q", MyType()) == 1 + return expr + + def go2(): + expr = column("p", MyType()) == 1 + return expr + + c1 = go1()._generate_cache_key()[0] + c2 = go1()._generate_cache_key()[0] + c3 = go2()._generate_cache_key()[0] + + eq_(c1, c2) + ne_(c1, c3) diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py index f3fb724c073..27f1b897420 100644 --- a/test/sql/test_functions.py +++ b/test/sql/test_functions.py @@ -86,6 +86,7 @@ def test_compile(self): # test generic function compile class fake_func(GenericFunction): + inherit_cache = True __return_type__ = sqltypes.Integer def __init__(self, arg, **kwargs): @@ -112,6 +113,7 @@ def test_operators_custom(self, op, other, expected, use_custom): if use_custom: class MyFunc(FunctionElement): + inherit_cache = True name = "myfunc" type = Integer() @@ -140,6 +142,7 @@ def test_use_labels(self): def test_use_labels_function_element(self): class max_(FunctionElement): name = "max" + inherit_cache = True @compiles(max_) def visit_max(element, compiler, **kw): @@ -265,7 +268,7 @@ def test_annotation_dialect_specific(self): def test_custom_default_namespace(self): class myfunc(GenericFunction): - pass + inherit_cache = True assert isinstance(func.myfunc(), myfunc) self.assert_compile(func.myfunc(), "myfunc()") @@ -273,6 +276,7 @@ class myfunc(GenericFunction): def test_custom_type(self): class myfunc(GenericFunction): type = DateTime + inherit_cache = True assert isinstance(func.myfunc().type, DateTime) self.assert_compile(func.myfunc(), "myfunc()") @@ -280,12 +284,14 @@ class myfunc(GenericFunction): def test_custom_legacy_type(self): # in case someone was using this system class myfunc(GenericFunction): + inherit_cache = True __return_type__ = DateTime assert isinstance(func.myfunc().type, DateTime) def test_case_sensitive(self): class MYFUNC(GenericFunction): + inherit_cache = True type = DateTime assert isinstance(func.MYFUNC().type, DateTime) @@ -341,6 +347,7 @@ class replaceable_func_override(GenericFunction): def test_custom_w_custom_name(self): class myfunc(GenericFunction): + inherit_cache = True name = "notmyfunc" assert isinstance(func.notmyfunc(), myfunc) @@ -348,6 +355,7 @@ class myfunc(GenericFunction): def test_custom_w_quoted_name(self): class myfunc(GenericFunction): + inherit_cache = True name = quoted_name("NotMyFunc", quote=True) identifier = "myfunc" @@ -355,6 +363,7 @@ class myfunc(GenericFunction): def test_custom_w_quoted_name_no_identifier(self): class myfunc(GenericFunction): + inherit_cache = True name = quoted_name("NotMyFunc", quote=True) # note this requires that the quoted name be lower cased for @@ -364,6 +373,7 @@ class myfunc(GenericFunction): def test_custom_package_namespace(self): def cls1(pk_name): class myfunc(GenericFunction): + inherit_cache = True package = pk_name return myfunc @@ -377,6 +387,7 @@ class myfunc(GenericFunction): def test_custom_name(self): class MyFunction(GenericFunction): name = "my_func" + inherit_cache = True def __init__(self, *args): args = args + (3,) @@ -392,20 +403,24 @@ class GeoBuffer(GenericFunction): package = "geo" name = "BufferOne" identifier = "buf1" + inherit_cache = True class GeoBuffer2(GenericFunction): type = Integer name = "BufferTwo" identifier = "buf2" + inherit_cache = True class BufferThree(GenericFunction): type = Integer identifier = "buf3" + inherit_cache = True class GeoBufferFour(GenericFunction): type = Integer name = "BufferFour" identifier = "Buf4" + inherit_cache = True self.assert_compile(func.geo.buf1(), "BufferOne()") self.assert_compile(func.buf2(), "BufferTwo()") @@ -418,7 +433,7 @@ class GeoBufferFour(GenericFunction): def test_custom_args(self): class myfunc(GenericFunction): - pass + inherit_cache = True self.assert_compile( myfunc(1, 2, 3), "myfunc(:myfunc_1, :myfunc_2, :myfunc_3)" @@ -1015,6 +1030,7 @@ def test_conn_execute(self, connection): from sqlalchemy.ext.compiler import compiles class myfunc(FunctionElement): + inherit_cache = True type = Date() @compiles(myfunc) diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py index 535d4dd0be8..8c8e9dbeda3 100644 --- a/test/sql/test_labels.py +++ b/test/sql/test_labels.py @@ -805,6 +805,8 @@ class ColExprLabelTest(fixtures.TestBase, AssertsCompiledSQL): def _fixture(self): class SomeColThing(WrapsColumnExpression, ColumnElement): + inherit_cache = False + def __init__(self, expression): self.clause = coercions.expect( roles.ExpressionElementRole, expression diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index a2aa9705cb1..76be0af3cea 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -17,6 +17,7 @@ from sqlalchemy.sql import select from sqlalchemy.sql import table from sqlalchemy.sql import util as sql_util +from sqlalchemy.sql.base import ExecutableOption from sqlalchemy.sql.traversals import HasCacheKey from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL @@ -810,7 +811,10 @@ def test_stmt_lambda_w_set_of_opts(self): stmt = lambdas.lambda_stmt(lambda: select(column("x"))) - opts = {column("x"), column("y")} + class MyUncacheable(ExecutableOption): + pass + + opts = {MyUncacheable()} assert_raises_message( exc.InvalidRequestError, @@ -942,11 +946,18 @@ def go(opts): return stmt - s1 = go([column("a"), column("b")]) + class SomeOpt(HasCacheKey, ExecutableOption): + def __init__(self, x): + self.x = x + + def _gen_cache_key(self, anon_map, bindparams): + return (SomeOpt, self.x) + + s1 = go([SomeOpt("a"), SomeOpt("b")]) - s2 = go([column("a"), column("b")]) + s2 = go([SomeOpt("a"), SomeOpt("b")]) - s3 = go([column("q"), column("b")]) + s3 = go([SomeOpt("q"), SomeOpt("b")]) s1key = s1._generate_cache_key() s2key = s2._generate_cache_key() @@ -964,7 +975,7 @@ def go(opts): return stmt - class SomeOpt(HasCacheKey): + class SomeOpt(HasCacheKey, ExecutableOption): def _gen_cache_key(self, anon_map, bindparams): return ("fixed_key",) @@ -994,8 +1005,8 @@ def go(opts): return stmt - class SomeOpt(HasCacheKey): - pass + class SomeOpt(HasCacheKey, ExecutableOption): + inherit_cache = False # generates no key, will not be cached eq_(SomeOpt()._generate_cache_key(), None) diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 0e6f4f2d96c..c04078f7372 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -656,6 +656,8 @@ class ExtensionOperatorTest(fixtures.TestBase, testing.AssertsCompiledSQL): def test_contains(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def contains(self, other, **kw): return self.op("->")(other) @@ -664,6 +666,8 @@ def contains(self, other, **kw): def test_getitem(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __getitem__(self, index): return self.op("->")(index) @@ -682,6 +686,8 @@ def __getitem__(self, index): def test_lshift(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __lshift__(self, other): return self.op("->")(other) @@ -690,6 +696,8 @@ def __lshift__(self, other): def test_rshift(self): class MyType(UserDefinedType): + cache_ok = True + class comparator_factory(UserDefinedType.Comparator): def __rshift__(self, other): return self.op("->")(other) diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index d07f81facee..89317b149ff 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -1798,6 +1798,7 @@ def _test_keyed_targeting_no_label_at_all(self, expression, conn): def test_keyed_targeting_no_label_at_all_one(self, connection): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): @@ -1815,6 +1816,7 @@ def visit_max(element, compiler, **kw): def test_keyed_targeting_no_label_at_all_two(self, connection): class not_named_max(expression.ColumnElement): name = "not_named_max" + inherit_cache = True @compiles(not_named_max) def visit_max(element, compiler, **kw): diff --git a/test/sql/test_types.py b/test/sql/test_types.py index ffa6f922ed2..14b1ca1051b 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -1550,6 +1550,8 @@ def process(value): return process class UTypeThree(types.UserDefinedType): + cache_ok = True + def get_col_spec(self): return "UTYPETHREE" From 1f8aee1442a22c9e65c91998684714802aa6fc44 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Dec 2021 12:36:25 -0500 Subject: [PATCH 048/632] changelog updates Change-Id: Ie136cfb7375e68b2badba1099b5b041f99da85fa (cherry picked from commit af50c8064d668ba33ef2399a288fd1594b1b5602) --- doc/build/changelog/unreleased_14/7311.rst | 2 +- doc/build/changelog/unreleased_14/7319.rst | 2 +- doc/build/conf.py | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7311.rst b/doc/build/changelog/unreleased_14/7311.rst index bdf1a45daa8..a04ec65d479 100644 --- a/doc/build/changelog/unreleased_14/7311.rst +++ b/doc/build/changelog/unreleased_14/7311.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, setup + :tags: bug, platform :tickets: 7311 Python 3.10 has deprecated "distutils" in favor of explicit use of diff --git a/doc/build/changelog/unreleased_14/7319.rst b/doc/build/changelog/unreleased_14/7319.rst index 0c2b19d3148..48da931a774 100644 --- a/doc/build/changelog/unreleased_14/7319.rst +++ b/doc/build/changelog/unreleased_14/7319.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, types, regression + :tags: bug, sql, regression :tickets: 7319 Extended the :attr:`.TypeDecorator.cache_ok` attribute and corresponding diff --git a/doc/build/conf.py b/doc/build/conf.py index d4b6bea0368..1c4cdbfb306 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -71,6 +71,7 @@ "mssql", "oracle", "firebird", + "tests", ] # tags to sort on inside of sections changelog_inner_tag_sort = [ From 09499bfa6fadc98c92ee0cc4588e14dc27152f5d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Dec 2021 12:51:43 -0500 Subject: [PATCH 049/632] implement attributes.Proxy._clone() Fixed issue where the internal cloning used by the :meth:`_orm.PropComparator.any` method on a :func:`_orm.relationship` in the case where the related class also makes use of ORM polymorphic loading, would fail if a hybrid property on the related, polymorphic class were used within the criteria for the ``any()`` operation. Fixes: #7425 Change-Id: I5f4f4ec5fab17df228bc6e3de412d24114b20600 (cherry picked from commit 546391e5a80f647e7ad78ef93f832f10278a8867) --- doc/build/changelog/unreleased_14/7425.rst | 9 ++++ lib/sqlalchemy/orm/attributes.py | 10 ++++ test/ext/test_hybrid.py | 61 ++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7425.rst diff --git a/doc/build/changelog/unreleased_14/7425.rst b/doc/build/changelog/unreleased_14/7425.rst new file mode 100644 index 00000000000..24b48ef444f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7425.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, ext + :tickets: 7425 + + Fixed issue where the internal cloning used by the + :meth:`_orm.PropComparator.any` method on a :func:`_orm.relationship` in + the case where the related class also makes use of ORM polymorphic loading, + would fail if a hybrid property on the related, polymorphic class were used + within the criteria for the ``any()`` operation. diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 9d1e27d977e..19e0d545e6d 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -574,6 +574,16 @@ def adapt_to_entity(self, adapt_to_entity): adapt_to_entity, ) + def _clone(self, **kw): + return self.__class__( + self.class_, + self.key, + self.descriptor, + self._comparator, + adapt_to_entity=self._adapt_to_entity, + original_property=self.original_property, + ) + def __get__(self, instance, owner): retval = self.descriptor.__get__(instance, owner) # detect if this is a plain Python @property, which just returns diff --git a/test/ext/test_hybrid.py b/test/ext/test_hybrid.py index ad8d92b9b37..be42fdb6d0e 100644 --- a/test/ext/test_hybrid.py +++ b/test/ext/test_hybrid.py @@ -245,6 +245,67 @@ class B(Base): return A, B + @testing.fixture + def _related_polymorphic_attr_fixture(self): + """test for #7425""" + + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + + bs = relationship("B", back_populates="a", lazy="joined") + + class B(Base): + __tablename__ = "poly" + __mapper_args__ = { + "polymorphic_on": "type", + # if with_polymorphic is removed, issue does not occur + "with_polymorphic": "*", + } + name = Column(String, primary_key=True) + type = Column(String) + a_id = Column(ForeignKey(A.id)) + + a = relationship(A, back_populates="bs") + + @hybrid.hybrid_property + def is_foo(self): + return self.name == "foo" + + return A, B + + def test_cloning_in_polymorphic_any( + self, _related_polymorphic_attr_fixture + ): + A, B = _related_polymorphic_attr_fixture + + session = fixture_session() + + # in the polymorphic case, A.bs.any() does a traverse() / clone() + # on the expression. so the proxedattribute coming from the hybrid + # has to support this. + + self.assert_compile( + session.query(A).filter(A.bs.any(B.name == "foo")), + "SELECT a.id AS a_id, poly_1.name AS poly_1_name, poly_1.type " + "AS poly_1_type, poly_1.a_id AS poly_1_a_id FROM a " + "LEFT OUTER JOIN poly AS poly_1 ON a.id = poly_1.a_id " + "WHERE EXISTS (SELECT 1 FROM poly WHERE a.id = poly.a_id " + "AND poly.name = :name_1)", + ) + + # SQL should be identical + self.assert_compile( + session.query(A).filter(A.bs.any(B.is_foo)), + "SELECT a.id AS a_id, poly_1.name AS poly_1_name, poly_1.type " + "AS poly_1_type, poly_1.a_id AS poly_1_a_id FROM a " + "LEFT OUTER JOIN poly AS poly_1 ON a.id = poly_1.a_id " + "WHERE EXISTS (SELECT 1 FROM poly WHERE a.id = poly.a_id " + "AND poly.name = :name_1)", + ) + @testing.fixture def _unnamed_expr_fixture(self): Base = declarative_base() From 5ae1c14f4c8cd1629b1730cd146cb2a71561b5e2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Dec 2021 14:23:42 -0500 Subject: [PATCH 050/632] implement correct errors for Row immutability Corrected the error message for the ``AttributeError`` that's raised when attempting to write to an attribute on the :class:`_result.Row` class, which is immutable. The previous message claimed the column didn't exist which is misleading. Fixes: #7432 Change-Id: If0e2cbd3f763dca6c99a18aa42252c69f1207d59 (cherry picked from commit f113e979219e20a22044c4b262e4531ba9993b8a) --- doc/build/changelog/unreleased_14/7432.rst | 8 +++ lib/sqlalchemy/engine/row.py | 37 +++++++---- test/sql/test_resultset.py | 74 ++++++++++++++++++++++ 3 files changed, 106 insertions(+), 13 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7432.rst diff --git a/doc/build/changelog/unreleased_14/7432.rst b/doc/build/changelog/unreleased_14/7432.rst new file mode 100644 index 00000000000..6e3f74c6710 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7432.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, engine + :tickets: 7432 + + Corrected the error message for the ``AttributeError`` that's raised when + attempting to write to an attribute on the :class:`_result.Row` class, + which is immutable. The previous message claimed the column didn't exist + which is misleading. diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index dc11e354862..02fc560ca4d 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -66,21 +66,25 @@ class BaseRow(object): def __init__(self, parent, processors, keymap, key_style, data): """Row objects are constructed by CursorResult objects.""" - self._parent = parent + object.__setattr__(self, "_parent", parent) if processors: - self._data = tuple( - [ - proc(value) if proc else value - for proc, value in zip(processors, data) - ] + object.__setattr__( + self, + "_data", + tuple( + [ + proc(value) if proc else value + for proc, value in zip(processors, data) + ] + ), ) else: - self._data = tuple(data) + object.__setattr__(self, "_data", tuple(data)) - self._keymap = keymap + object.__setattr__(self, "_keymap", keymap) - self._key_style = key_style + object.__setattr__(self, "_key_style", key_style) def __reduce__(self): return ( @@ -211,6 +215,12 @@ class Row(BaseRow, collections_abc.Sequence): # in 2.0, this should be KEY_INTEGER_ONLY _default_key_style = KEY_OBJECTS_BUT_WARN + def __setattr__(self, name, value): + raise AttributeError("can't set attribute") + + def __delattr__(self, name): + raise AttributeError("can't delete attribute") + @property def _mapping(self): """Return a :class:`.RowMapping` for this :class:`.Row`. @@ -269,10 +279,11 @@ def __getstate__(self): } def __setstate__(self, state): - self._parent = parent = state["_parent"] - self._data = state["_data"] - self._keymap = parent._keymap - self._key_style = state["_key_style"] + parent = state["_parent"] + object.__setattr__(self, "_parent", parent) + object.__setattr__(self, "_data", state["_data"]) + object.__setattr__(self, "_keymap", parent._keymap) + object.__setattr__(self, "_key_style", state["_key_style"]) def _op(self, other, op): return ( diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 89317b149ff..088f5807474 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -637,6 +637,80 @@ def test_column_accessor_unary(self, connection): eq_(r._mapping[users.c.user_name], "john") eq_(r.user_name, "john") + @testing.fixture + def _ab_row_fixture(self, connection): + r = connection.execute( + select(literal(1).label("a"), literal(2).label("b")) + ).first() + return r + + def test_named_tuple_access(self, _ab_row_fixture): + r = _ab_row_fixture + eq_(r.a, 1) + eq_(r.b, 2) + + def test_named_tuple_missing_attr(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message( + AttributeError, "Could not locate column in row for column 'c'" + ): + r.c + + def test_named_tuple_no_delete_present(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message(AttributeError, "can't delete attribute"): + del r.a + + def test_named_tuple_no_delete_missing(self, _ab_row_fixture): + r = _ab_row_fixture + # including for non-existent attributes + with expect_raises_message(AttributeError, "can't delete attribute"): + del r.c + + def test_named_tuple_no_assign_present(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message(AttributeError, "can't set attribute"): + r.a = 5 + + with expect_raises_message(AttributeError, "can't set attribute"): + r.a += 5 + + def test_named_tuple_no_assign_missing(self, _ab_row_fixture): + r = _ab_row_fixture + # including for non-existent attributes + with expect_raises_message(AttributeError, "can't set attribute"): + r.c = 5 + + def test_named_tuple_no_self_assign_missing(self, _ab_row_fixture): + r = _ab_row_fixture + with expect_raises_message( + AttributeError, "Could not locate column in row for column 'c'" + ): + r.c += 5 + + def test_mapping_tuple_readonly_errors(self, connection): + r = connection.execute( + select(literal(1).label("a"), literal(2).label("b")) + ).first() + r = r._mapping + eq_(r["a"], 1) + eq_(r["b"], 2) + + with expect_raises_message( + KeyError, "Could not locate column in row for column 'c'" + ): + r["c"] + + with expect_raises_message( + TypeError, "'RowMapping' object does not support item assignment" + ): + r["a"] = 5 + + with expect_raises_message( + TypeError, "'RowMapping' object does not support item assignment" + ): + r["a"] += 5 + def test_column_accessor_err(self, connection): r = connection.execute(select(1)).first() assert_raises_message( From c4e99629270ec665cb9f010d4b7531c4605b5873 Mon Sep 17 00:00:00 2001 From: Lele Gaifax Date: Thu, 9 Dec 2021 08:14:06 +0100 Subject: [PATCH 051/632] Fix typo in docstring (cherry picked from commit 9fd1bb4e8bf8a21f8c20b4390a9e7d579556ffc0) --- lib/sqlalchemy/sql/selectable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 6004349ac70..7d18113219e 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -6597,7 +6597,7 @@ class Exists(UnaryExpression): See :func:`_sql.exists` for a description of usage. - An ``EXISTS`` clase can also be constructed from a :func:`_sql.select` + An ``EXISTS`` clause can also be constructed from a :func:`_sql.select` instance by calling :meth:`_sql.SelectBase.exists`. """ From 89ef7c226731882d8bdc5964718ad6350b22a9e0 Mon Sep 17 00:00:00 2001 From: Lele Gaifax Date: Thu, 9 Dec 2021 08:13:42 +0100 Subject: [PATCH 052/632] Fix typo in changelog --- doc/build/changelog/unreleased_14/7386.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7386.rst b/doc/build/changelog/unreleased_14/7386.rst index e344453c8c4..2a902286883 100644 --- a/doc/build/changelog/unreleased_14/7386.rst +++ b/doc/build/changelog/unreleased_14/7386.rst @@ -3,5 +3,5 @@ :tickets: 7386 Support multiple clause elements in the :meth:`_sql.Exists.where` method, - unifying the api with the on presented by a normal :func:`_sql.select` + unifying the api with the one presented by a normal :func:`_sql.select` construct. From e6872c5c46718aef49d64963b4fe0db773056963 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Dec 2021 14:35:43 -0500 Subject: [PATCH 053/632] - 1.4.28 --- doc/build/changelog/changelog_14.rst | 215 +++++++++++++++++- doc/build/changelog/unreleased_14/4390.rst | 9 - doc/build/changelog/unreleased_14/7259.rst | 13 -- doc/build/changelog/unreleased_14/7311.rst | 12 - doc/build/changelog/unreleased_14/7318.rst | 13 -- doc/build/changelog/unreleased_14/7319.rst | 24 -- doc/build/changelog/unreleased_14/7321.rst | 16 -- doc/build/changelog/unreleased_14/7368.rst | 9 - doc/build/changelog/unreleased_14/7386.rst | 7 - doc/build/changelog/unreleased_14/7388.rst | 13 -- doc/build/changelog/unreleased_14/7389.rst | 8 - doc/build/changelog/unreleased_14/7394.rst | 49 ---- doc/build/changelog/unreleased_14/7400.rst | 6 - doc/build/changelog/unreleased_14/7425.rst | 9 - doc/build/changelog/unreleased_14/pytest7.rst | 11 - doc/build/conf.py | 4 +- 16 files changed, 216 insertions(+), 202 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/4390.rst delete mode 100644 doc/build/changelog/unreleased_14/7259.rst delete mode 100644 doc/build/changelog/unreleased_14/7311.rst delete mode 100644 doc/build/changelog/unreleased_14/7318.rst delete mode 100644 doc/build/changelog/unreleased_14/7319.rst delete mode 100644 doc/build/changelog/unreleased_14/7321.rst delete mode 100644 doc/build/changelog/unreleased_14/7368.rst delete mode 100644 doc/build/changelog/unreleased_14/7386.rst delete mode 100644 doc/build/changelog/unreleased_14/7388.rst delete mode 100644 doc/build/changelog/unreleased_14/7389.rst delete mode 100644 doc/build/changelog/unreleased_14/7394.rst delete mode 100644 doc/build/changelog/unreleased_14/7400.rst delete mode 100644 doc/build/changelog/unreleased_14/7425.rst delete mode 100644 doc/build/changelog/unreleased_14/pytest7.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 758d925aa3f..f5c590e8bda 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,220 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.28 - :include_notes_from: unreleased_14 + :released: December 9, 2021 + + .. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash which would occur when using Mypy plugin against code + which made use of :class:`_orm.declared_attr` methods for non-mapped names + like ``__mapper_args__``, ``__table_args__``, or other dunder names, as the + plugin would try to interpret these as mapped attributes which would then + be later mis-handled. As part of this change, the decorated function is + still converted by the plugin into a generic assignment statement (e.g. + ``__mapper_args__: Any``) so that the argument signature can continue to be + annotated in the same way one would for any other ``@classmethod`` without + Mypy complaining about the wrong argument type for a method that isn't + explicitly ``@classmethod``. + + + + .. change:: + :tags: bug, orm, ext + :tickets: 7425 + + Fixed issue where the internal cloning used by the + :meth:`_orm.PropComparator.any` method on a :func:`_orm.relationship` in + the case where the related class also makes use of ORM polymorphic loading, + would fail if a hybrid property on the related, polymorphic class were used + within the criteria for the ``any()`` operation. + + .. change:: + :tags: bug, platform + :tickets: 7311 + + Python 3.10 has deprecated "distutils" in favor of explicit use of + "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports + accordingly. However, since setuptools itself only recently added the + replacement symbols mentioned in pep-632 as of November of 2021 in version + 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy + 1.4 does not have a hard setuptools versioning requirement at this time. + SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout + which will indicate appropriate setuptools versioning up front. + + .. change:: + :tags: bug, sql, regression + :tickets: 7319 + + Extended the :attr:`.TypeDecorator.cache_ok` attribute and corresponding + warning message if this flag is not defined, a behavior first established + for :class:`.TypeDecorator` as part of :ticket:`6436`, to also take place + for :class:`.UserDefinedType`, by generalizing the flag and associated + caching logic to a new common base for these two types, + :class:`.ExternalType` to create :attr:`.UserDefinedType.cache_ok`. + + The change means any current :class:`.UserDefinedType` will now cause SQL + statement caching to no longer take place for statements which make use of + the datatype, along with a warning being emitted, unless the class defines + the :attr:`.UserDefinedType.cache_ok` flag as True. If the datatype cannot + form a deterministic, hashable cache key derived from its arguments, + the attribute may be set to False which will continue to keep caching disabled but will suppress the + warning. In particular, custom datatypes currently used in packages such as + SQLAlchemy-utils will need to implement this flag. The issue was observed + as a result of a SQLAlchemy-utils datatype that is not currently cacheable. + + .. seealso:: + + :attr:`.ExternalType.cache_ok` + + .. change:: + :tags: deprecated, orm + :tickets: 4390 + + Deprecated an undocumented loader option syntax ``".*"``, which appears to + be no different than passing a single asterisk, and will emit a deprecation + warning if used. This syntax may have been intended for something but there + is currently no need for it. + + + .. change:: + :tags: bug, orm, mypy + :tickets: 7368 + + Fixed issue where the :func:`_orm.as_declarative` decorator and similar + functions used to generate the declarative base class would not copy the + ``__class_getitem__()`` method from a given superclass, which prevented the + use of pep-484 generics in conjunction with the ``Base`` class. Pull + request courtesy Kai Mueller. + + .. change:: + :tags: usecase, engine + :tickets: 7400 + + Added support for ``copy()`` and ``deepcopy()`` to the :class:`_url.URL` + class. Pull request courtesy Tom Ritchford. + + .. change:: + :tags: bug, orm, regression + :tickets: 7318 + + Fixed ORM regression where the new behavior of "eager loaders run on + unexpire" added in :ticket:`1763` would lead to loader option errors being + raised inappropriately for the case where a single :class:`_orm.Query` or + :class:`_sql.Select` were used to load multiple kinds of entities, along + with loader options that apply to just one of those kinds of entity like a + :func:`_orm.joinedload`, and later the objects would be refreshed from + expiration, where the loader options would attempt to be applied to the + mismatched object type and then raise an exception. The check for this + mismatch now bypasses raising an error for this case. + + .. change:: + :tags: bug, sql + :tickets: 7394 + + Custom SQL elements, third party dialects, custom or third party datatypes + will all generate consistent warnings when they do not clearly opt in or + out of SQL statement caching, which is achieved by setting the appropriate + attributes on each type of class. The warning links to documentation + sections which indicate the appropriate approach for each type of object in + order for caching to be enabled. + + .. change:: + :tags: bug, sql + :tickets: 7394 + + Fixed missing caching directives for a few lesser used classes in SQL Core + which would cause ``[no key]`` to be logged for elements which made use of + these. + + .. change:: + :tags: bug, postgresql + :tickets: 7394 + + Fixed missing caching directives for :class:`_postgresql.hstore` and + :class:`_postgresql.array` constructs which would cause ``[no key]`` + to be logged for these elements. + + .. change:: + :tags: bug, orm + :tickets: 7394 + + User defined ORM options, such as those illustrated in the dogpile.caching + example which subclass :class:`_orm.UserDefinedOption`, by definition are + handled on every statement execution and do not need to be considered as + part of the cache key for the statement. Caching of the base + :class:`.ExecutableOption` class has been modified so that it is no longer + a :class:`.HasCacheKey` subclass directly, so that the presence of user + defined option objects will not have the unwanted side effect of disabling + statement caching. Only ORM specific loader and criteria options, which are + all internal to SQLAlchemy, now participate within the caching system. + + .. change:: + :tags: bug, orm + :tickets: 7394 + + Fixed issue where mappings that made use of :func:`_orm.synonym` and + potentially other kinds of "proxy" attributes would not in all cases + successfully generate a cache key for their SQL statements, leading to + degraded performance for those statements. + + .. change:: + :tags: sql, usecase + :tickets: 7259 + + "Compound select" methods like :meth:`_sql.Select.union`, + :meth:`_sql.Select.intersect_all` etc. now accept ``*other`` as an argument + rather than ``other`` to allow for multiple additional SELECTs to be + compounded with the parent statement at once. In particular, the change as + applied to :meth:`_sql.CTE.union` and :meth:`_sql.CTE.union_all` now allow + for a so-called "non-linear CTE" to be created with the :class:`_sql.CTE` + construct, whereas previously there was no way to have more than two CTE + sub-elements in a UNION together while still correctly calling upon the CTE + in recursive fashion. Pull request courtesy Eric Masseran. + + .. change:: + :tags: bug, tests + + Implemented support for the test suite to run correctly under Pytest 7. + Previously, only Pytest 6.x was supported for Python 3, however the version + was not pinned on the upper bound in tox.ini. Pytest is not pinned in + tox.ini to be lower than version 8 so that SQLAlchemy versions released + with the current codebase will be able to be tested under tox without + changes to the environment. Much thanks to the Pytest developers for + their help with this issue. + + + .. change:: + :tags: orm, bug + :tickets: 7389 + + Fixed issue where a list mapped with :func:`_orm.relationship` would go + into an endless loop if in-place added to itself, i.e. the ``+=`` operator + were used, as well as if ``.extend()`` were given the same list. + + + .. change:: + :tags: usecase, sql + :tickets: 7386 + + Support multiple clause elements in the :meth:`_sql.Exists.where` method, + unifying the api with the one presented by a normal :func:`_sql.select` + construct. + + .. change:: + :tags: bug, orm + :tickets: 7388 + + Fixed issue where if an exception occurred when the :class:`_orm.Session` + were to close the connection within the :meth:`_orm.Session.commit` method, + when using a context manager for :meth:`_orm.Session.begin` , it would + attempt a rollback which would not be possible as the :class:`_orm.Session` + was in between where the transaction is committed and the connection is + then to be returned to the pool, raising the exception "this + sessiontransaction is in the committed state". This exception can occur + mostly in an asyncio context where CancelledError can be raised. + .. changelog:: :version: 1.4.27 diff --git a/doc/build/changelog/unreleased_14/4390.rst b/doc/build/changelog/unreleased_14/4390.rst deleted file mode 100644 index abbc664ee8e..00000000000 --- a/doc/build/changelog/unreleased_14/4390.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: deprecated, orm - :tickets: 4390 - - Deprecated an undocumented loader option syntax ``".*"``, which appears to - be no different than passing a single asterisk, and will emit a deprecation - warning if used. This syntax may have been intended for something but there - is currently no need for it. - diff --git a/doc/build/changelog/unreleased_14/7259.rst b/doc/build/changelog/unreleased_14/7259.rst deleted file mode 100644 index 477714edd9c..00000000000 --- a/doc/build/changelog/unreleased_14/7259.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: sql, usecase - :tickets: 7259 - - "Compound select" methods like :meth:`_sql.Select.union`, - :meth:`_sql.Select.intersect_all` etc. now accept ``*other`` as an argument - rather than ``other`` to allow for multiple additional SELECTs to be - compounded with the parent statement at once. In particular, the change as - applied to :meth:`_sql.CTE.union` and :meth:`_sql.CTE.union_all` now allow - for a so-called "non-linear CTE" to be created with the :class:`_sql.CTE` - construct, whereas previously there was no way to have more than two CTE - sub-elements in a UNION together while still correctly calling upon the CTE - in recursive fashion. Pull request courtesy Eric Masseran. diff --git a/doc/build/changelog/unreleased_14/7311.rst b/doc/build/changelog/unreleased_14/7311.rst deleted file mode 100644 index a04ec65d479..00000000000 --- a/doc/build/changelog/unreleased_14/7311.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, platform - :tickets: 7311 - - Python 3.10 has deprecated "distutils" in favor of explicit use of - "setuptools" in :pep:`632`; SQLAlchemy's setup.py has replaced imports - accordingly. However, since setuptools itself only recently added the - replacement symbols mentioned in pep-632 as of November of 2021 in version - 59.0.1, ``setup.py`` still has fallback imports to distutils, as SQLAlchemy - 1.4 does not have a hard setuptools versioning requirement at this time. - SQLAlchemy 2.0 is expected to use a full :pep:`517` installation layout - which will indicate appropriate setuptools versioning up front. diff --git a/doc/build/changelog/unreleased_14/7318.rst b/doc/build/changelog/unreleased_14/7318.rst deleted file mode 100644 index d60e303c9c7..00000000000 --- a/doc/build/changelog/unreleased_14/7318.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7318 - - Fixed ORM regression where the new behavior of "eager loaders run on - unexpire" added in :ticket:`1763` would lead to loader option errors being - raised inappropriately for the case where a single :class:`_orm.Query` or - :class:`_sql.Select` were used to load multiple kinds of entities, along - with loader options that apply to just one of those kinds of entity like a - :func:`_orm.joinedload`, and later the objects would be refreshed from - expiration, where the loader options would attempt to be applied to the - mismatched object type and then raise an exception. The check for this - mismatch now bypasses raising an error for this case. diff --git a/doc/build/changelog/unreleased_14/7319.rst b/doc/build/changelog/unreleased_14/7319.rst deleted file mode 100644 index 48da931a774..00000000000 --- a/doc/build/changelog/unreleased_14/7319.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. change:: - :tags: bug, sql, regression - :tickets: 7319 - - Extended the :attr:`.TypeDecorator.cache_ok` attribute and corresponding - warning message if this flag is not defined, a behavior first established - for :class:`.TypeDecorator` as part of :ticket:`6436`, to also take place - for :class:`.UserDefinedType`, by generalizing the flag and associated - caching logic to a new common base for these two types, - :class:`.ExternalType` to create :attr:`.UserDefinedType.cache_ok`. - - The change means any current :class:`.UserDefinedType` will now cause SQL - statement caching to no longer take place for statements which make use of - the datatype, along with a warning being emitted, unless the class defines - the :attr:`.UserDefinedType.cache_ok` flag as True. If the datatype cannot - form a deterministic, hashable cache key derived from its arguments, - the attribute may be set to False which will continue to keep caching disabled but will suppress the - warning. In particular, custom datatypes currently used in packages such as - SQLAlchemy-utils will need to implement this flag. The issue was observed - as a result of a SQLAlchemy-utils datatype that is not currently cacheable. - - .. seealso:: - - :attr:`.ExternalType.cache_ok` diff --git a/doc/build/changelog/unreleased_14/7321.rst b/doc/build/changelog/unreleased_14/7321.rst deleted file mode 100644 index 08cca434481..00000000000 --- a/doc/build/changelog/unreleased_14/7321.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, mypy - :tickets: 7321 - - Fixed Mypy crash which would occur when using Mypy plugin against code - which made use of :class:`_orm.declared_attr` methods for non-mapped names - like ``__mapper_args__``, ``__table_args__``, or other dunder names, as the - plugin would try to interpret these as mapped attributes which would then - be later mis-handled. As part of this change, the decorated function is - still converted by the plugin into a generic assignment statement (e.g. - ``__mapper_args__: Any``) so that the argument signature can continue to be - annotated in the same way one would for any other ``@classmethod`` without - Mypy complaining about the wrong argument type for a method that isn't - explicitly ``@classmethod``. - - diff --git a/doc/build/changelog/unreleased_14/7368.rst b/doc/build/changelog/unreleased_14/7368.rst deleted file mode 100644 index d4415ffec8f..00000000000 --- a/doc/build/changelog/unreleased_14/7368.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, mypy - :tickets: 7368 - - Fixed issue where the :func:`_orm.as_declarative` decorator and similar - functions used to generate the declarative base class would not copy the - ``__class_getitem__()`` method from a given superclass, which prevented the - use of pep-484 generics in conjunction with the ``Base`` class. Pull - request courtesy Kai Mueller. diff --git a/doc/build/changelog/unreleased_14/7386.rst b/doc/build/changelog/unreleased_14/7386.rst deleted file mode 100644 index 2a902286883..00000000000 --- a/doc/build/changelog/unreleased_14/7386.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, sql - :tickets: 7386 - - Support multiple clause elements in the :meth:`_sql.Exists.where` method, - unifying the api with the one presented by a normal :func:`_sql.select` - construct. diff --git a/doc/build/changelog/unreleased_14/7388.rst b/doc/build/changelog/unreleased_14/7388.rst deleted file mode 100644 index 1c7775a3419..00000000000 --- a/doc/build/changelog/unreleased_14/7388.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7388 - - Fixed issue where if an exception occurred when the :class:`_orm.Session` - were to close the connection within the :meth:`_orm.Session.commit` method, - when using a context manager for :meth:`_orm.Session.begin` , it would - attempt a rollback which would not be possible as the :class:`_orm.Session` - was in between where the transaction is committed and the connection is - then to be returned to the pool, raising the exception "this - sessiontransaction is in the committed state". This exception can occur - mostly in an asyncio context where CancelledError can be raised. - diff --git a/doc/build/changelog/unreleased_14/7389.rst b/doc/build/changelog/unreleased_14/7389.rst deleted file mode 100644 index 887193c2895..00000000000 --- a/doc/build/changelog/unreleased_14/7389.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: orm, bug - :tickets: 7389 - - Fixed issue where a list mapped with :func:`_orm.relationship` would go - into an endless loop if in-place added to itself, i.e. the ``+=`` operator - were used, as well as if ``.extend()`` were given the same list. - diff --git a/doc/build/changelog/unreleased_14/7394.rst b/doc/build/changelog/unreleased_14/7394.rst deleted file mode 100644 index 66bda3e4e4c..00000000000 --- a/doc/build/changelog/unreleased_14/7394.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7394 - - Custom SQL elements, third party dialects, custom or third party datatypes - will all generate consistent warnings when they do not clearly opt in or - out of SQL statement caching, which is achieved by setting the appropriate - attributes on each type of class. The warning links to documentation - sections which indicate the appropriate approach for each type of object in - order for caching to be enabled. - -.. change:: - :tags: bug, sql - :tickets: 7394 - - Fixed missing caching directives for a few lesser used classes in SQL Core - which would cause ``[no key]`` to be logged for elements which made use of - these. - -.. change:: - :tags: bug, postgresql - :tickets: 7394 - - Fixed missing caching directives for :class:`_postgresql.hstore` and - :class:`_postgresql.array` constructs which would cause ``[no key]`` - to be logged for these elements. - -.. change:: - :tags: bug, orm - :tickets: 7394 - - User defined ORM options, such as those illustrated in the dogpile.caching - example which subclass :class:`_orm.UserDefinedOption`, by definition are - handled on every statement execution and do not need to be considered as - part of the cache key for the statement. Caching of the base - :class:`.ExecutableOption` class has been modified so that it is no longer - a :class:`.HasCacheKey` subclass directly, so that the presence of user - defined option objects will not have the unwanted side effect of disabling - statement caching. Only ORM specific loader and criteria options, which are - all internal to SQLAlchemy, now participate within the caching system. - -.. change:: - :tags: bug, orm - :tickets: 7394 - - Fixed issue where mappings that made use of :func:`_orm.synonym` and - potentially other kinds of "proxy" attributes would not in all cases - successfully generate a cache key for their SQL statements, leading to - degraded performance for those statements. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7400.rst b/doc/build/changelog/unreleased_14/7400.rst deleted file mode 100644 index 799b3b9a361..00000000000 --- a/doc/build/changelog/unreleased_14/7400.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: usecase, engine - :tickets: 7400 - - Added support for ``copy()`` and ``deepcopy()`` to the :class:`_url.URL` - class. Pull request courtesy Tom Ritchford. diff --git a/doc/build/changelog/unreleased_14/7425.rst b/doc/build/changelog/unreleased_14/7425.rst deleted file mode 100644 index 24b48ef444f..00000000000 --- a/doc/build/changelog/unreleased_14/7425.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, ext - :tickets: 7425 - - Fixed issue where the internal cloning used by the - :meth:`_orm.PropComparator.any` method on a :func:`_orm.relationship` in - the case where the related class also makes use of ORM polymorphic loading, - would fail if a hybrid property on the related, polymorphic class were used - within the criteria for the ``any()`` operation. diff --git a/doc/build/changelog/unreleased_14/pytest7.rst b/doc/build/changelog/unreleased_14/pytest7.rst deleted file mode 100644 index 4397626269b..00000000000 --- a/doc/build/changelog/unreleased_14/pytest7.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, tests - - Implemented support for the test suite to run correctly under Pytest 7. - Previously, only Pytest 6.x was supported for Python 3, however the version - was not pinned on the upper bound in tox.ini. Pytest is not pinned in - tox.ini to be lower than version 8 so that SQLAlchemy versions released - with the current codebase will be able to be tested under tox without - changes to the environment. Much thanks to the Pytest developers for - their help with this issue. - diff --git a/doc/build/conf.py b/doc/build/conf.py index 1c4cdbfb306..28c04b449ec 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -204,9 +204,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.27" +release = "1.4.28" -release_date = "November 11, 2021" +release_date = "December 9, 2021" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 93d569e4182b6e6ac25e3950f70031744eeca096 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Dec 2021 14:50:36 -0500 Subject: [PATCH 054/632] Version 1.4.29 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index f5c590e8bda..006e0c585fc 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.29 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.28 :released: December 9, 2021 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index e962fc3b8fe..d249fbbcc14 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.28" +__version__ = "1.4.29" def __go(lcls): From aef7286fc0bf2bd3d56f03549df136c6503388a4 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 9 Dec 2021 21:47:22 +0100 Subject: [PATCH 055/632] Include import error message when greenlet is not installed Fixes: #7419 Change-Id: I0c604875a80287acff3bab732f67601a5e2db98c (cherry picked from commit 533f5718904b620be8d63f2474229945d6f8ba5d) --- lib/sqlalchemy/util/concurrency.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index 37ecfdbc338..ebd845cebb2 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -8,12 +8,13 @@ from . import compat have_greenlet = False +greenlet_error = None if compat.py3k: try: import greenlet # noqa F401 - except ImportError: - pass + except ImportError as e: + greenlet_error = str(e) else: have_greenlet = True from ._concurrency_py3k import await_only @@ -45,6 +46,9 @@ def _not_implemented(): else: raise ValueError( "the greenlet library is required to use this function." + " %s" % greenlet_error + if greenlet_error + else "" ) def is_exit_exception(e): # noqa F811 From 3aa003de5b789e06d4364890f57db294e5951b77 Mon Sep 17 00:00:00 2001 From: Leo Singer Date: Thu, 7 Oct 2021 22:07:08 -0400 Subject: [PATCH 056/632] Update references to tables in PostgreSQL documentation Since the table numbers change from one version of PostgreSQL to the next, refer to the tables by name rather than by number. (cherry picked from commit 786bc099743285ca4af3786d8f54cd59ce7a7c81) --- lib/sqlalchemy/dialects/postgresql/ranges.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index c54179c8182..e7129aebb5b 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -13,15 +13,15 @@ class RangeOperators(object): """ This mixin provides functionality for the Range Operators - listed in Table 9-44 of the `PostgreSQL documentation`__ for Range - Functions and Operators. It is used by all the range types + listed in the Range Operators table of the `PostgreSQL documentation`__ + for Range Functions and Operators. It is used by all the range types provided in the ``postgres`` dialect and can likely be used for any range types you create yourself. __ https://www.postgresql.org/docs/devel/static/functions-range.html - No extra support is provided for the Range Functions listed in - Table 9-45 of the PostgreSQL documentation. For these, the normal + No extra support is provided for the Range Functions listed in the Range + Functions table of the PostgreSQL documentation. For these, the normal :func:`~sqlalchemy.sql.expression.func` object should be used. """ From fd7a0e60d0e9df81f44271a65c8364408e98235e Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 9 Dec 2021 22:11:37 +0100 Subject: [PATCH 057/632] Add execution options to ``Session.get`` Fixes: #7410 Change-Id: Iab6427b8b4c2ada8c31ef69f92d27c1185dbb6b1 (cherry picked from commit ec1fee363e7d538a2239818cfb1e341eddddcf36) --- doc/build/changelog/unreleased_14/7410.rst | 6 ++++++ lib/sqlalchemy/orm/session.py | 15 +++++++++++++++ test/orm/test_scoping.py | 1 + test/orm/test_session.py | 17 +++++++++++++++++ 4 files changed, 39 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7410.rst diff --git a/doc/build/changelog/unreleased_14/7410.rst b/doc/build/changelog/unreleased_14/7410.rst new file mode 100644 index 00000000000..7b4e8efbf11 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7410.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: usecase, orm + :tickets: 7410 + + Added :paramref:`_orm.Session.get.execution_options` parameter which was + previously missing from the :meth:`_orm.Session.get` method. diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 034651326be..5f1560a6974 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2685,6 +2685,7 @@ def get( populate_existing=False, with_for_update=None, identity_token=None, + execution_options=None, ): """Return an instance based on the given primary key identifier, or ``None`` if not found. @@ -2765,6 +2766,19 @@ def get( :meth:`_query.Query.with_for_update`. Supersedes the :paramref:`.Session.refresh.lockmode` parameter. + :param execution_options: optional dictionary of execution options, + which will be associated with the query execution if one is emitted. + This dictionary can provide a subset of the options that are + accepted by :meth:`_engine.Connection.execution_options`, and may + also provide additional options understood only in an ORM context. + + .. versionadded:: 1.4.29 + + .. seealso:: + + :ref:`orm_queryguide_execution_options` - ORM-specific execution + options + :return: The object instance, or ``None``. """ @@ -2776,6 +2790,7 @@ def get( populate_existing=populate_existing, with_for_update=with_for_update, identity_token=identity_token, + execution_options=execution_options, ) def _get_impl( diff --git a/test/orm/test_scoping.py b/test/orm/test_scoping.py index 87f0a2aae89..e23f42ac53e 100644 --- a/test/orm/test_scoping.py +++ b/test/orm/test_scoping.py @@ -156,6 +156,7 @@ def test_methods_etc(self): populate_existing=False, with_for_update=None, identity_token=None, + execution_options=None, ), ], ) diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 4ee71fd5ba5..62974b62919 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -461,6 +461,23 @@ def test_make_transient_to_detached_no_key_allowed(self): u1, ) + def test_get_execution_option(self): + users, User = self.tables.users, self.classes.User + + self.mapper_registry.map_imperatively(User, users) + sess = fixture_session() + called = [False] + + @event.listens_for(sess, "do_orm_execute") + def check(ctx): + called[0] = True + eq_(ctx.execution_options["foo"], "bar") + + sess.get(User, 42, execution_options={"foo": "bar"}) + sess.close() + + is_true(called[0]) + class SessionStateTest(_fixtures.FixtureTest): run_inserts = None From fe9906b5f367972e3f3903c229ac0f3603c7318a Mon Sep 17 00:00:00 2001 From: Nils Philippsen Date: Sat, 13 Nov 2021 11:11:32 -0500 Subject: [PATCH 058/632] Add async_engine_from_config() Added :func:`_asyncio.async_engine_config` function to create an async engine from a configuration dict. This otherwise behaves the same as :func:`_sa.engine_from_config`. Fixes: #7301 Closes: #7302 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7302 Pull-request-sha: c7c758833b6c37b7509b8c5bed4f26ac0ccc0395 Change-Id: I64feadf95b5015c24fe0fa0dbae6755b72d1713e (cherry picked from commit 98b1650efce79ae6d304eb08047890ef8ae89df1) --- doc/build/changelog/unreleased_14/7301.rst | 7 +++++++ doc/build/orm/extensions/asyncio.rst | 2 ++ lib/sqlalchemy/ext/asyncio/__init__.py | 1 + lib/sqlalchemy/ext/asyncio/engine.py | 23 ++++++++++++++++++++++ test/ext/asyncio/test_engine_py3k.py | 11 +++++++++++ 5 files changed, 44 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7301.rst diff --git a/doc/build/changelog/unreleased_14/7301.rst b/doc/build/changelog/unreleased_14/7301.rst new file mode 100644 index 00000000000..a8a71afacec --- /dev/null +++ b/doc/build/changelog/unreleased_14/7301.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, asyncio + :tickets: 7425 + + Added :func:`_asyncio.async_engine_config` function to create + an async engine from a configuration dict. This otherwise + behaves the same as :func:`_sa.engine_from_config`. diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 91c7c53e1e9..a7d2fb16be7 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -732,6 +732,8 @@ Engine API Documentation .. autofunction:: create_async_engine +.. autofunction:: async_engine_from_config + .. autoclass:: AsyncEngine :members: diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index ac3b905c615..03103971375 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -5,6 +5,7 @@ # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +from .engine import async_engine_from_config from .engine import AsyncConnection from .engine import AsyncEngine from .engine import AsyncTransaction diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index bfaaea4d92e..0b212830e60 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -41,6 +41,29 @@ def create_async_engine(*arg, **kw): return AsyncEngine(sync_engine) +def async_engine_from_config(configuration, prefix="sqlalchemy.", **kwargs): + """Create a new AsyncEngine instance using a configuration dictionary. + + This function is analogous to the :func:`_sa.engine_from_config` function + in SQLAlchemy Core, except that the requested dialect must be an + asyncio-compatible dialect such as :ref:`dialect-postgresql-asyncpg`. + The argument signature of the function is identical to that + of :func:`_sa.engine_from_config`. + + .. versionadded:: 1.4.29 + + """ + options = { + key[len(prefix) :]: value + for key, value in configuration.items() + if key.startswith(prefix) + } + options["_coerce_config"] = True + options.update(kwargs) + url = options.pop("url") + return create_async_engine(url, **options) + + class AsyncConnectable: __slots__ = "_slots_dispatch", "__weakref__" diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 3c260f9e5d9..bd07bba0db6 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -14,6 +14,7 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union_all +from sqlalchemy.ext.asyncio import async_engine_from_config from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.ext.asyncio import engine as _async_engine from sqlalchemy.ext.asyncio import exc as asyncio_exc @@ -591,6 +592,16 @@ async def test_create_async_engine_server_side_cursor(self, async_engine): server_side_cursors=True, ) + def test_async_engine_from_config(self): + config = { + "sqlalchemy.url": str(testing.db.url), + "sqlalchemy.echo": "true", + } + engine = async_engine_from_config(config) + assert engine.url == testing.db.url + assert engine.echo is True + assert engine.dialect.is_async is True + class AsyncEventTest(EngineFixture): """The engine events all run in their normal synchronous context. From aaa9b040cef9f040f53ff447b9a7a8e815e75673 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 9 Dec 2021 22:41:47 +0100 Subject: [PATCH 059/632] Add ``scalars`` to Migration - ORM Usage table. Fixes #7407 Change-Id: I0ec7c0dd44dce3b907296824ee4e6103bc72a6dd (cherry picked from commit d692b11608f540e4d4df8f4127ee52a476e0e5e4) --- doc/build/changelog/migration_20.rst | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 8f35220d890..86c8b1a69b3 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -1206,9 +1206,12 @@ following the table, and may include additional notes not summarized here. session.execute( select(User) ).scalars().all() + # or + session.scalars(select(User)).all() - :ref:`migration_20_unify_select` + :meth:`_orm.Session.scalars` :meth:`_engine.Result.scalars` * - :: @@ -1235,11 +1238,11 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( + session.scalars( select(User). filter_by(name="some user"). limit(1) - ).scalars().first() + ).first() - :ref:`migration_20_unify_select` @@ -1253,7 +1256,7 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( + session.scalars( select(User). options( joinedload(User.addresses) @@ -1289,12 +1292,12 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( + session.scalars( select(User). from_statement( text("select * from users") ) - ).scalars().all() + ).all() - :ref:`orm_queryguide_selecting_text` From 05a19294d81bff53b81d8524a60b580f401bcb45 Mon Sep 17 00:00:00 2001 From: Nils Philippsen Date: Sat, 11 Dec 2021 17:24:22 +0100 Subject: [PATCH 060/632] Fix referenced ticket number (#7439) Signed-off-by: Nils Philippsen --- doc/build/changelog/unreleased_14/7301.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7301.rst b/doc/build/changelog/unreleased_14/7301.rst index a8a71afacec..160e06c8383 100644 --- a/doc/build/changelog/unreleased_14/7301.rst +++ b/doc/build/changelog/unreleased_14/7301.rst @@ -1,6 +1,6 @@ .. change:: :tags: usecase, asyncio - :tickets: 7425 + :tickets: 7301 Added :func:`_asyncio.async_engine_config` function to create an async engine from a configuration dict. This otherwise From 8dafa1b19c0003b92eb9b81dd33e2989ae5e2ec2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 12 Dec 2021 11:23:07 -0500 Subject: [PATCH 061/632] check for string_types, not str, for py2 support Fixed regression in the :func:`_engine.make_url` function used to parse URL strings where the query string parsing would go into a recursion overflow if a Python 2 ``u''`` string were used. Fixes: #7446 Change-Id: I081275673e6240a52f71da7dfaaf04e6fe32cf48 --- doc/build/changelog/unreleased_14/7446.rst | 7 +++++++ lib/sqlalchemy/engine/url.py | 2 +- test/engine/test_parseconnect.py | 12 ++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7446.rst diff --git a/doc/build/changelog/unreleased_14/7446.rst b/doc/build/changelog/unreleased_14/7446.rst new file mode 100644 index 00000000000..d92eb13a8db --- /dev/null +++ b/doc/build/changelog/unreleased_14/7446.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, engine, regression + :tickets: 7446 + + Fixed regression in the :func:`_engine.make_url` function used to parse URL + strings where the query string parsing would go into a recursion overflow + if a Python 2 ``u''`` string were used. diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 320e69fbc38..a73b81a319b 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -182,7 +182,7 @@ def _str_dict(cls, dict_): return util.EMPTY_DICT def _assert_value(val): - if isinstance(val, str): + if isinstance(val, compat.string_types): return val elif isinstance(val, collections_abc.Sequence): return tuple(_assert_value(elem) for elem in val) diff --git a/test/engine/test_parseconnect.py b/test/engine/test_parseconnect.py index f553b1dab56..28362ba2a1c 100644 --- a/test/engine/test_parseconnect.py +++ b/test/engine/test_parseconnect.py @@ -6,6 +6,7 @@ from sqlalchemy import exc from sqlalchemy import pool from sqlalchemy import testing +from sqlalchemy import util from sqlalchemy.dialects import plugins from sqlalchemy.dialects import registry from sqlalchemy.engine.default import DefaultDialect @@ -181,6 +182,17 @@ def test_query_string(self): eq_(u.query, {"arg1=": "param1", "arg2": "param 2"}) eq_(str(u), test_url) + def test_query_string_py2_unicode(self): + url_str = u"dialect://user:pass@host/?arg1=param1&arg2=param2" + if util.py2k: + # just to make sure linters / formatters etc. don't erase the + # 'u' above + assert isinstance(url_str, unicode) # noqa + u = url.make_url(url_str) + eq_(u.query, {"arg1": "param1", "arg2": "param2"}) + eq_(u.database, "") + eq_(str(u), "dialect://user:pass@host/?arg1=param1&arg2=param2") + def test_comparison(self): common_url = ( "dbtype://username:password" From 60404ad39d63840a4a52927c2d878933d0a26830 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 12 Dec 2021 13:37:21 -0500 Subject: [PATCH 062/632] use the options from the cached statement for propagate_options Fixed caching-related issue where the use of a loader option of the form ``lazyload(aliased(A).bs).joinedload(B.cs)`` would fail to result in the joinedload being invoked for runs subsequent to the query being cached, due to a mismatch for the options / object path applied to the objects loaded for a query with a lead entity that used ``aliased()``. Fixes: #7447 Change-Id: I4e9c34654b7d3668cd8878decbd688afe2af5f81 (cherry picked from commit 04421c8bed9e93a625b7164e99eb1ee0395bebfe) --- doc/build/changelog/unreleased_14/7447.rst | 10 + lib/sqlalchemy/orm/context.py | 22 +- test/orm/test_eager_relations.py | 252 ++++++++++++--------- 3 files changed, 170 insertions(+), 114 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7447.rst diff --git a/doc/build/changelog/unreleased_14/7447.rst b/doc/build/changelog/unreleased_14/7447.rst new file mode 100644 index 00000000000..3f954faba4e --- /dev/null +++ b/doc/build/changelog/unreleased_14/7447.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7447 + + Fixed caching-related issue where the use of a loader option of the form + ``lazyload(aliased(A).bs).joinedload(B.cs)`` would fail to result in the + joinedload being invoked for runs subsequent to the query being cached, due + to a mismatch for the options / object path applied to the objects loaded + for a query with a lead entity that used ``aliased()``. + diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 0a93d993af4..14130e40c4e 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -106,7 +106,27 @@ def __init__( self.params = params self.propagated_loader_options = { - o for o in statement._with_options if o.propagate_to_loaders + # issue 7447. + # propagated loader options will be present on loaded InstanceState + # objects under state.load_options and are typically used by + # LazyLoader to apply options to the SELECT statement it emits. + # For compile state options (i.e. loader strategy options), these + # need to line up with the ".load_path" attribute which in + # loader.py is pulled from context.compile_state.current_path. + # so, this means these options have to be the ones from the + # *cached* statement that's travelling with compile_state, not the + # *current* statement which won't match up for an ad-hoc + # AliasedClass + cached_o + for cached_o in compile_state.select_statement._with_options + if cached_o.propagate_to_loaders and cached_o._is_compile_state + } | { + # for user defined loader options that are not "compile state", + # those just need to be present as they are + uncached_o + for uncached_o in statement._with_options + if uncached_o.propagate_to_loaders + and not uncached_o._is_compile_state } self.attributes = dict(compile_state.attributes) diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index 32abc3b31c4..2ab2bba5c5a 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -6068,153 +6068,179 @@ def go(): def test_lazyload_aliased_abs_bcs_one(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(joinedload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(joinedload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_aliased_abs_bcs_two(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(defaultload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(defaultload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_pathed_lazyload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(A).joinedload(A.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 3) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(A).joinedload(A.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 3) def test_pathed_lazyload_plus_joined_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(aa).defaultload(aa.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 2) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(aa).defaultload(aa.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 2) def test_pathed_joinedload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - opt = Load(aa).joinedload(aa.bs).joinedload(B.cs) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(opt) - ) - self._run_tests(q, 1) + for i in range(2): + s = fixture_session() + aa = aliased(A) + opt = Load(aa).joinedload(aa.bs).joinedload(B.cs) + + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(opt) + ) + self._run_tests(q, 1) def test_lazyload_plus_joined_aliased_abs_bcs(self): + """by running the test twice, this test includes a test + for #7447 to ensure cached queries apply the cached option objects + to the InstanceState which line up with the cached current_path.""" + A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(defaultload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 2) + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(defaultload(aa.bs).joinedload(B.cs)) + ) + + self._run_tests(q, 2) def test_joinedload_aliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(aa, A) - .filter(aa.id == 1) - .filter(A.id == 2) - .filter(aa.id != A.id) - .options(joinedload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 1) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(aa, A) + .filter(aa.id == 1) + .filter(A.id == 2) + .filter(aa.id != A.id) + .options(joinedload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 1) def test_lazyload_unaliased_abs_bcs_one(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(joinedload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(joinedload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_unaliased_abs_bcs_two(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(defaultload(aa.bs).joinedload(B.cs)) - ) - self._run_tests(q, 3) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(defaultload(aa.bs).joinedload(B.cs)) + ) + self._run_tests(q, 3) def test_lazyload_plus_joined_unaliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(defaultload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 2) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(defaultload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 2) def test_joinedload_unaliased_abs_bcs(self): A, B, C = self.classes("A", "B", "C") - s = fixture_session() - aa = aliased(A) - q = ( - s.query(A, aa) - .filter(aa.id == 2) - .filter(A.id == 1) - .filter(aa.id != A.id) - .options(joinedload(A.bs).joinedload(B.cs)) - ) - self._run_tests(q, 1) + + for i in range(2): + s = fixture_session() + aa = aliased(A) + q = ( + s.query(A, aa) + .filter(aa.id == 2) + .filter(A.id == 1) + .filter(aa.id != A.id) + .options(joinedload(A.bs).joinedload(B.cs)) + ) + self._run_tests(q, 1) class EntityViaMultiplePathTestThree(fixtures.DeclarativeMappedTest): From 4f0bf86eec0ea557b2df31cce28612d5f124a8a3 Mon Sep 17 00:00:00 2001 From: Nils Philippsen Date: Sun, 12 Dec 2021 18:35:03 -0500 Subject: [PATCH 063/632] Ignore ephemeral classes in test_all_present() Fixed a regression in the test suite where the test called ``CompareAndCopyTest::test_all_present`` would fail on some platforms due to additional testing artifacts being detected. Pull request courtesy Nils Philippsen. In some circumstances, ephemeral class objects that are created within the scope of a test method don't seem to be garbage collected directly on exit. Filter out classes created in test modules. Fixes: #7450 Closes: #7451 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7451 Pull-request-sha: 135a8aaba2c6941460c7f45aa1a55c8f6b9eb43d Change-Id: I621967bd916089dc1e3f98625fd2a852cd9fd712 (cherry picked from commit 4291e3c235569031948ebaacec4dde8776255e0e) --- doc/build/changelog/unreleased_14/7450.rst | 9 +++++++++ test/sql/test_compare.py | 6 +++++- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7450.rst diff --git a/doc/build/changelog/unreleased_14/7450.rst b/doc/build/changelog/unreleased_14/7450.rst new file mode 100644 index 00000000000..56aaa1d4f78 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7450.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, tests, regression + :tickets: 7450 + + Fixed a regression in the test suite where the test called + ``CompareAndCopyTest::test_all_present`` would fail on some platforms due + to additional testing artifacts being detected. Pull request courtesy Nils + Philippsen. + diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index a5252601c42..a4684cccffa 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -1357,6 +1357,10 @@ def setup_test_class(cls): ] def test_all_present(self): + """test for elements that are in SQLAlchemy Core, that they are + also included in the fixtures above. + + """ need = set( cls for cls in class_hierarchy(ClauseElement) @@ -1364,9 +1368,9 @@ def test_all_present(self): and ( "__init__" in cls.__dict__ or issubclass(cls, AliasedReturnsRows) - or "inherit_cache" not in cls.__dict__ ) and not issubclass(cls, (Annotated)) + and cls.__module__.startswith("sqlalchemy.") and "orm" not in cls.__module__ and "compiler" not in cls.__module__ and "crud" not in cls.__module__ From 52ae2230d46414b6c270b9c83c6004ab15fe01d9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 14 Dec 2021 16:46:50 -0500 Subject: [PATCH 064/632] include InterfaceError for mariadb disconnect check Corrected the error classes inspected for the "is_disconnect" check for the ``mariadbconnector`` dialect, which was failing for disconnects that occurred due to common MySQL/MariaDB error codes such as 2006; the DBAPI appears to currently use the ``mariadb.InterfaceError`` exception class for disconnect errors such as error code 2006, which has been added to the list of classes checked. For the current "real reconnect test", shutting down the mariadb connection from the client side produces ProgrammingError("Connection isn't valid anymore") which we also continue to intercept. Fixes: #7457 Change-Id: I0b37cd7a73359a23ad756ff2af0a9333c841221b (cherry picked from commit 3a33fa8b0acd9220ef4428ac6a56ebd8d40c3762) --- doc/build/changelog/unreleased_14/7457.rst | 11 +++++++++++ lib/sqlalchemy/dialects/mysql/base.py | 23 +++++++++++++--------- 2 files changed, 25 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7457.rst diff --git a/doc/build/changelog/unreleased_14/7457.rst b/doc/build/changelog/unreleased_14/7457.rst new file mode 100644 index 00000000000..b1942b0eae2 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7457.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, mariadb + :tickets: 7457 + + Corrected the error classes inspected for the "is_disconnect" check for the + ``mariadbconnector`` dialect, which was failing for disconnects that + occurred due to common MySQL/MariaDB error codes such as 2006; the DBAPI + appears to currently use the ``mariadb.InterfaceError`` exception class for + disconnect errors such as error code 2006, which has been added to the list + of classes checked. + diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index a9e3d0de7d9..91356500f7f 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -2578,16 +2578,21 @@ def do_recover_twophase(self, connection): def is_disconnect(self, e, connection, cursor): if isinstance( - e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError) + e, + ( + self.dbapi.OperationalError, + self.dbapi.ProgrammingError, + self.dbapi.InterfaceError, + ), + ) and self._extract_error_code(e) in ( + 1927, + 2006, + 2013, + 2014, + 2045, + 2055, ): - return self._extract_error_code(e) in ( - 1927, - 2006, - 2013, - 2014, - 2045, - 2055, - ) + return True elif isinstance( e, (self.dbapi.InterfaceError, self.dbapi.InternalError) ): From a0cfc934dc1286f24f3e769b305cf2faaa818302 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 21 Dec 2021 18:08:33 -0500 Subject: [PATCH 065/632] accommodate for "clone" of ColumnClause for use with the ClauseElement.params() method, altered ColumnClause._clone() so that while the element stays immutable, if the column is associated with a subquery, it returns a new version of itself as corresponding to a clone of the subquery. this allows processing functions to access the parameters in the subquery and produce a copy of it. The use case here is the expanded use of .params() within loader strategies that use HasCacheKey._apply_params_to_element(). Fixed issue in new "loader criteria" method :meth:`_orm.PropComparator.and_` where usage with a loader strategy like :func:`_orm.selectinload` against a column that was a member of the ``.c.`` collection of a subquery object, where the subquery would be dynamically added to the FROM clause of the statement, would be subject to stale parameter values within the subquery in the SQL statement cache, as the process used by the loader strategy to replace the parameters at execution time would fail to accommodate the subquery when received in this form. Fixes: #7489 Change-Id: Ibb3b6af140b8a62a2c8d05b2ac92e86ca3013c46 (cherry picked from commit 267e9cbf6e3c165a4e953b49d979d7f4ddc533f9) --- doc/build/changelog/unreleased_14/7489.rst | 13 +++++ lib/sqlalchemy/sql/elements.py | 18 ++++++- lib/sqlalchemy/sql/traversals.py | 1 - lib/sqlalchemy/sql/visitors.py | 2 +- test/orm/test_relationship_criteria.py | 60 ++++++++++++++++++++++ test/sql/test_external_traversal.py | 42 +++++++++++++++ 6 files changed, 133 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7489.rst diff --git a/doc/build/changelog/unreleased_14/7489.rst b/doc/build/changelog/unreleased_14/7489.rst new file mode 100644 index 00000000000..4af33f42985 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7489.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, orm + :tickets: 7489 + + Fixed issue in new "loader criteria" method + :meth:`_orm.PropComparator.and_` where usage with a loader strategy like + :func:`_orm.selectinload` against a column that was a member of the ``.c.`` + collection of a subquery object, where the subquery would be dynamically + added to the FROM clause of the statement, would be subject to stale + parameter values within the subquery in the SQL statement cache, as the + process used by the loader strategy to replace the parameters at execution + time would fail to accommodate the subquery when received in this form. + diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 08eb37f2ce7..48b64545319 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -358,6 +358,7 @@ def params(self, *optionaldict, **kwargs): return self._replace_params(False, optionaldict, kwargs) def _replace_params(self, unique, optionaldict, kwargs): + if len(optionaldict) == 1: kwargs.update(optionaldict[0]) elif len(optionaldict) > 1: @@ -373,7 +374,9 @@ def visit_bindparam(bind): bind._convert_to_unique() return cloned_traverse( - self, {"maintain_key": True}, {"bindparam": visit_bindparam} + self, + {"maintain_key": True, "detect_subquery_cols": True}, + {"bindparam": visit_bindparam}, ) def compare(self, other, **kw): @@ -4880,6 +4883,19 @@ def entity_namespace(self): else: return super(ColumnClause, self).entity_namespace + def _clone(self, detect_subquery_cols=False, **kw): + if ( + detect_subquery_cols + and self.table is not None + and self.table._is_subquery + ): + clone = kw.pop("clone") + table = clone(self.table, **kw) + new = table.c.corresponding_column(self) + return new + + return super(ColumnClause, self)._clone(**kw) + @HasMemoized.memoized_attribute def _from_objects(self): t = self.table diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 27e65652654..9da61ab28cb 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -271,7 +271,6 @@ def _gen_cache_key(self, anon_map, bindparams): result += meth( attrname, obj, self, anon_map, bindparams ) - return result def _generate_cache_key(self): diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index 7111c5efd70..3636be4be61 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -772,7 +772,7 @@ def clone(elem, **kw): cloned[id(elem)] = newelem return newelem - cloned[id(elem)] = newelem = elem._clone(**kw) + cloned[id(elem)] = newelem = elem._clone(clone=clone, **kw) newelem._copy_internals(clone=clone, **kw) meth = visitors.get(newelem.__visit_name__, None) if meth: diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 7e2c6e04f9f..00e84dc8d87 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -1240,6 +1240,66 @@ def go(value): ), ) + def test_selectinload_local_criteria_subquery(self, user_address_fixture): + """test #7489""" + User, Address = user_address_fixture + + s = Session(testing.db, future=True) + + def go(value): + a1 = aliased(Address) + subq = select(a1.id).where(a1.email_address != value).subquery() + stmt = ( + select(User) + .options( + selectinload(User.addresses.and_(Address.id == subq.c.id)), + ) + .order_by(User.id) + ) + result = s.execute(stmt) + return result + + for value in ( + "ed@wood.com", + "ed@lala.com", + "ed@wood.com", + "ed@lala.com", + ): + s.close() + with self.sql_execution_asserter() as asserter: + result = go(value) + + eq_( + result.scalars().unique().all(), + self._user_minus_edwood(*user_address_fixture) + if value == "ed@wood.com" + else self._user_minus_edlala(*user_address_fixture), + ) + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id" + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, " + "addresses.id AS addresses_id, " + "addresses.email_address AS addresses_email_address " + # note the comma-separated FROM clause + "FROM addresses, (SELECT addresses_1.id AS id FROM " + "addresses AS addresses_1 " + "WHERE addresses_1.email_address != :email_address_1) " + "AS anon_1 WHERE addresses.user_id " + "IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.id = anon_1.id ORDER BY addresses.id", + [ + { + "primary_keys": [7, 8, 9, 10], + "email_address_1": value, + } + ], + ), + ) + @testing.combinations((True,), (False,), argnames="use_compiled_cache") def test_selectinload_nested_criteria( self, user_order_item_fixture, use_compiled_cache diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index e01ec0738e9..c14b8b4c68b 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -827,6 +827,48 @@ def test_params_elements_in_setup_joins(self): sel._generate_cache_key()[1], ) + def test_params_on_expr_against_subquery(self): + """test #7489""" + + meta = MetaData() + + b = Table("b", meta, Column("id", Integer), Column("data", String)) + + subq = select(b.c.id).where(b.c.data == "some data").subquery() + criteria = b.c.id == subq.c.id + + stmt = select(b).where(criteria) + param_key = stmt._generate_cache_key()[1][0].key + + self.assert_compile( + stmt, + "SELECT b.id, b.data FROM b, (SELECT b.id AS id " + "FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id", + checkparams={"data_1": "some data"}, + ) + eq_( + [ + eq_clause_element(bindparam(param_key, value="some data")), + ], + stmt._generate_cache_key()[1], + ) + + stmt = select(b).where(criteria.params({param_key: "some other data"})) + self.assert_compile( + stmt, + "SELECT b.id, b.data FROM b, (SELECT b.id AS id " + "FROM b WHERE b.data = :data_1) AS anon_1 WHERE b.id = anon_1.id", + checkparams={"data_1": "some other data"}, + ) + eq_( + [ + eq_clause_element( + bindparam(param_key, value="some other data") + ), + ], + stmt._generate_cache_key()[1], + ) + def test_params_subqueries_in_joins_one(self): """test #7055""" From ad4a645cac2062ef9ca3a88790cf487f07565f3e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Dec 2021 16:21:33 -0500 Subject: [PATCH 066/632] use fully qualified, locatable names for all use of api.named_type() Fixed mypy regression where the release of mypy 0.930 added additional internal checks to the format of "named types", requiring that they be fully qualified and locatable. This broke the mypy plugin for SQLAlchemy, raising an assertion error, as there was use of symbols such as ``__builtins__`` and other un-locatable or unqualified names that previously had not raised any assertions. Fixes: #7496 Change-Id: I037680606a1d51158ef6503508ec76c5d5adc946 (cherry picked from commit aded8b11d9eccbd1f2b645a94338e34a3d234bc9) --- doc/build/changelog/unreleased_14/7496.rst | 11 +++++++++++ lib/sqlalchemy/ext/mypy/apply.py | 9 +++++---- lib/sqlalchemy/ext/mypy/decl_class.py | 2 +- lib/sqlalchemy/ext/mypy/infer.py | 10 ++++++---- lib/sqlalchemy/ext/mypy/names.py | 6 ++++++ lib/sqlalchemy/ext/mypy/plugin.py | 4 ++-- 6 files changed, 31 insertions(+), 11 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7496.rst diff --git a/doc/build/changelog/unreleased_14/7496.rst b/doc/build/changelog/unreleased_14/7496.rst new file mode 100644 index 00000000000..cc5875fcc1c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7496.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, mypy + :tickets: 7496 + + Fixed mypy regression where the release of mypy 0.930 added additional + internal checks to the format of "named types", requiring that they be + fully qualified and locatable. This broke the mypy plugin for SQLAlchemy, + raising an assertion error, as there was use of symbols such as + ``__builtins__`` and other un-locatable or unqualified names that + previously had not raised any assertions. + diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index cf5b4fda257..b3af0560c60 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -36,6 +36,7 @@ from . import infer from . import util +from .names import NAMED_TYPE_SQLA_MAPPED def apply_mypy_mapped_attr( @@ -134,7 +135,7 @@ def re_apply_declarative_assignments( and isinstance(stmt.rvalue.callee.expr, NameExpr) and stmt.rvalue.callee.expr.node is not None and stmt.rvalue.callee.expr.node.fullname - == "sqlalchemy.orm.attributes.Mapped" + == NAMED_TYPE_SQLA_MAPPED and stmt.rvalue.callee.name == "_empty_constructor" and isinstance(stmt.rvalue.args[0], CallExpr) and isinstance(stmt.rvalue.args[0].callee, RefExpr) @@ -165,7 +166,7 @@ def re_apply_declarative_assignments( if python_type_for_type is not None: left_node.type = api.named_type( - "__sa_Mapped", [python_type_for_type] + NAMED_TYPE_SQLA_MAPPED, [python_type_for_type] ) if update_cls_metadata: @@ -202,12 +203,12 @@ class User(Base): if left_hand_explicit_type is not None: left_node.type = api.named_type( - "__sa_Mapped", [left_hand_explicit_type] + NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] ) else: lvalue.is_inferred_def = False left_node.type = api.named_type( - "__sa_Mapped", + NAMED_TYPE_SQLA_MAPPED, [] if python_type_for_type is None else [python_type_for_type], ) diff --git a/lib/sqlalchemy/ext/mypy/decl_class.py b/lib/sqlalchemy/ext/mypy/decl_class.py index 0d7462d5bde..c33c30e2574 100644 --- a/lib/sqlalchemy/ext/mypy/decl_class.py +++ b/lib/sqlalchemy/ext/mypy/decl_class.py @@ -327,7 +327,7 @@ class MyClass: ) left_node.node.type = api.named_type( - "__sa_Mapped", [left_hand_explicit_type] + names.NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] ) # this will ignore the rvalue entirely diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index 6d243b6ec1d..3cd946e04d0 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -147,7 +147,7 @@ class MyClass: type_is_a_collection = True if python_type_for_type is not None: python_type_for_type = api.named_type( - "__builtins__.list", [python_type_for_type] + names.NAMED_TYPE_BUILTINS_LIST, [python_type_for_type] ) elif ( uselist_arg is None or api.parse_bool(uselist_arg) is True @@ -438,7 +438,7 @@ def _infer_type_from_left_and_inferred_right( if not is_subtype(left_hand_explicit_type, python_type_for_type): effective_type = api.named_type( - "__sa_Mapped", [orig_python_type_for_type] + names.NAMED_TYPE_SQLA_MAPPED, [orig_python_type_for_type] ) msg = ( @@ -507,7 +507,9 @@ def infer_type_from_left_hand_type_only( ) util.fail(api, msg.format(node.name), node) - return api.named_type("__sa_Mapped", [AnyType(TypeOfAny.special_form)]) + return api.named_type( + names.NAMED_TYPE_SQLA_MAPPED, [AnyType(TypeOfAny.special_form)] + ) else: # use type from the left hand side @@ -529,7 +531,7 @@ def extract_python_type_from_typeengine( return Instance(first_arg.node, []) # TODO: support other pep-435 types here else: - return api.named_type("__builtins__.str", []) + return api.named_type(names.NAMED_TYPE_BUILTINS_STR, []) assert node.has_base("sqlalchemy.sql.type_api.TypeEngine"), ( "could not extract Python type from node: %s" % node diff --git a/lib/sqlalchemy/ext/mypy/names.py b/lib/sqlalchemy/ext/mypy/names.py index 3dbfcc77032..8ec15a6d43a 100644 --- a/lib/sqlalchemy/ext/mypy/names.py +++ b/lib/sqlalchemy/ext/mypy/names.py @@ -47,6 +47,12 @@ DECLARATIVE_MIXIN: int = util.symbol("DECLARATIVE_MIXIN") # type: ignore QUERY_EXPRESSION: int = util.symbol("QUERY_EXPRESSION") # type: ignore +# names that must succeed with mypy.api.named_type +NAMED_TYPE_BUILTINS_OBJECT = "builtins.object" +NAMED_TYPE_BUILTINS_STR = "builtins.str" +NAMED_TYPE_BUILTINS_LIST = "builtins.list" +NAMED_TYPE_SQLA_MAPPED = "sqlalchemy.orm.attributes.Mapped" + _lookup: Dict[str, Tuple[int, Set[str]]] = { "Column": ( COLUMN, diff --git a/lib/sqlalchemy/ext/mypy/plugin.py b/lib/sqlalchemy/ext/mypy/plugin.py index 356b0d9489e..8687012a1e4 100644 --- a/lib/sqlalchemy/ext/mypy/plugin.py +++ b/lib/sqlalchemy/ext/mypy/plugin.py @@ -142,7 +142,7 @@ def _dynamic_class_hook(ctx: DynamicClassDefContext) -> None: ) info.bases = [Instance(cls_arg.node, [])] else: - obj = ctx.api.named_type("__builtins__.object") + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) info.bases = [obj] @@ -152,7 +152,7 @@ def _dynamic_class_hook(ctx: DynamicClassDefContext) -> None: util.fail( ctx.api, "Not able to calculate MRO for declarative base", ctx.call ) - obj = ctx.api.named_type("__builtins__.object") + obj = ctx.api.named_type(names.NAMED_TYPE_BUILTINS_OBJECT) info.bases = [obj] info.fallback_to_any = True From e93459f578133e278bb0fe3e202bccb077cc2c59 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Dec 2021 15:33:11 -0500 Subject: [PATCH 067/632] add recursion check for with_loader_criteria() option Fixed recursion overflow which could occur within ORM statement compilation when using either the :func:`_orm.with_loader_criteria` feature or the the :meth:`_orm.PropComparator.and_` method within a loader strategy in conjunction with a subquery which referred to the same entity being altered by the criteria option, or loaded by the loader strategy. A check for coming across the same loader criteria option in a recursive fashion has been added to accommodate for this scenario. Fixes: #7491 Change-Id: I8701332717c45a21948ea4788a3058c0fbbf03a7 (cherry picked from commit c66c6d1aeff92f838740b7745a9c2a47852949d6) --- doc/build/changelog/unreleased_14/7491.rst | 12 +++ lib/sqlalchemy/orm/context.py | 3 +- lib/sqlalchemy/orm/util.py | 17 ++- lib/sqlalchemy/sql/annotation.py | 12 ++- test/orm/test_relationship_criteria.py | 118 +++++++++++++++++++++ 5 files changed, 156 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7491.rst diff --git a/doc/build/changelog/unreleased_14/7491.rst b/doc/build/changelog/unreleased_14/7491.rst new file mode 100644 index 00000000000..f1a19525bb1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7491.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, orm + :tickets: 7491 + + Fixed recursion overflow which could occur within ORM statement compilation + when using either the :func:`_orm.with_loader_criteria` feature or the the + :meth:`_orm.PropComparator.and_` method within a loader strategy in + conjunction with a subquery which referred to the same entity being altered + by the criteria option, or loaded by the loader strategy. A check for + coming across the same loader criteria option in a recursive fashion has + been added to accommodate for this scenario. + diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 14130e40c4e..b828bcb4601 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -2143,7 +2143,8 @@ def _get_extra_criteria(self, ext_info): for ae in self.global_attributes[ ("additional_entity_criteria", ext_info.mapper) ] - if ae.include_aliases or ae.entity is ext_info + if (ae.include_aliases or ae.entity is ext_info) + and ae._should_include(self) ) else: return () diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 0e844906809..fba483f8904 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1149,11 +1149,24 @@ def _all_mappers(self): else: stack.extend(subclass.__subclasses__()) + def _should_include(self, compile_state): + if ( + compile_state.select_statement._annotations.get( + "for_loader_criteria", None + ) + is self + ): + return False + return True + def _resolve_where_criteria(self, ext_info): if self.deferred_where_criteria: - return self.where_criteria._resolve_with_args(ext_info.entity) + crit = self.where_criteria._resolve_with_args(ext_info.entity) else: - return self.where_criteria + crit = self.where_criteria + return sql_util._deep_annotate( + crit, {"for_loader_criteria": self}, detect_subquery_cols=True + ) def process_compile_state_replaced_entities( self, compile_state, mapper_entities diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index e6618937a4f..3c02ccb26fa 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -238,7 +238,9 @@ def entity_namespace(self): annotated_classes = {} -def _deep_annotate(element, annotations, exclude=None): +def _deep_annotate( + element, annotations, exclude=None, detect_subquery_cols=False +): """Deep copy the given ClauseElement, annotating each element with the given annotations dictionary. @@ -252,6 +254,7 @@ def _deep_annotate(element, annotations, exclude=None): cloned_ids = {} def clone(elem, **kw): + kw["detect_subquery_cols"] = detect_subquery_cols id_ = id(elem) if id_ in cloned_ids: @@ -262,9 +265,12 @@ def clone(elem, **kw): and hasattr(elem, "proxy_set") and elem.proxy_set.intersection(exclude) ): - newelem = elem._clone(**kw) + newelem = elem._clone(clone=clone, **kw) elif annotations != elem._annotations: - newelem = elem._annotate(annotations) + if detect_subquery_cols and elem._is_immutable: + newelem = elem._clone(clone=clone, **kw)._annotate(annotations) + else: + newelem = elem._annotate(annotations) else: newelem = elem newelem._copy_internals(clone=clone) diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 00e84dc8d87..932f80d9f59 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -4,6 +4,7 @@ from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import event +from sqlalchemy import exc as sa_exc from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Integer @@ -25,6 +26,7 @@ from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm.decl_api import declared_attr from sqlalchemy.testing import eq_ +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.util import resolve_lambda @@ -250,6 +252,50 @@ def test_select_from_mapper_mapper_criteria(self, user_address_fixture): "WHERE users.name != :name_1", ) + def test_with_loader_criteria_recursion_check_scalar_subq( + self, user_address_fixture + ): + """test #7491""" + + User, Address = user_address_fixture + subq = select(Address).where(Address.id == 8).scalar_subquery() + stmt = ( + select(User) + .join(Address) + .options(with_loader_criteria(Address, Address.id == subq)) + ) + self.assert_compile( + stmt, + "SELECT users.id, users.name FROM users JOIN addresses " + "ON users.id = addresses.user_id AND addresses.id = " + "(SELECT addresses.id, addresses.user_id, " + "addresses.email_address FROM addresses " + "WHERE addresses.id = :id_1)", + ) + + def test_with_loader_criteria_recursion_check_from_subq( + self, user_address_fixture + ): + """test #7491""" + + User, Address = user_address_fixture + subq = select(Address).where(Address.id == 8).subquery() + stmt = ( + select(User) + .join(Address) + .options(with_loader_criteria(Address, Address.id == subq.c.id)) + ) + # note this query is incorrect SQL right now. This is a current + # artifact of how with_loader_criteria() is used and may be considered + # a bug at some point, in which case if fixed this query can be + # changed. the main thing we are testing at the moment is that + # there is not a recursion overflow. + self.assert_compile( + stmt, + "SELECT users.id, users.name FROM users JOIN addresses " + "ON users.id = addresses.user_id AND addresses.id = anon_1.id", + ) + def test_select_mapper_columns_mapper_criteria(self, user_address_fixture): User, Address = user_address_fixture @@ -1300,6 +1346,78 @@ def go(value): ), ) + @testing.combinations( + (joinedload, False), + (lazyload, True), + (subqueryload, False), + (selectinload, True), + argnames="opt,results_supported", + ) + def test_loader_criteria_subquery_w_same_entity( + self, user_address_fixture, opt, results_supported + ): + """test #7491. + + note this test also uses the not-quite-supported form of subquery + criteria introduced by #7489. where we also have to clone + the subquery linked only from a column criteria. this required + additional changes to the _annotate() method that is also + test here, which is why two of the loader strategies still fail; + we're just testing that there's no recursion overflow with this + very particular form. + + """ + User, Address = user_address_fixture + + s = Session(testing.db, future=True) + + def go(value): + subq = ( + select(Address.id) + .where(Address.email_address != value) + .subquery() + ) + stmt = ( + select(User) + .options( + # subquery here would need to be added to the FROM + # clause. this isn't quite supported and won't work + # right now with joinedoad() or subqueryload(). + opt(User.addresses.and_(Address.id == subq.c.id)), + ) + .order_by(User.id) + ) + result = s.execute(stmt) + return result + + for value in ( + "ed@wood.com", + "ed@lala.com", + "ed@wood.com", + "ed@lala.com", + ): + s.close() + + if not results_supported: + # for joinedload and subqueryload, the query generated here + # is invalid right now; this is because it's already not + # quite a supported pattern to refer to a subquery-bound + # column in loader criteria. However, the main thing we want + # to prevent here is the recursion overflow, so make sure + # we get a DBAPI error at least indicating compilation + # succeeded. + with expect_raises(sa_exc.DBAPIError): + go(value).scalars().unique().all() + else: + result = go(value).scalars().unique().all() + + eq_( + result, + self._user_minus_edwood(*user_address_fixture) + if value == "ed@wood.com" + else self._user_minus_edlala(*user_address_fixture), + ) + @testing.combinations((True,), (False,), argnames="use_compiled_cache") def test_selectinload_nested_criteria( self, user_order_item_fixture, use_compiled_cache From 42c2330b2bb2003abc965cd0400e20706591bc08 Mon Sep 17 00:00:00 2001 From: Kai Mueller <15907922+kasium@users.noreply.github.com> Date: Tue, 21 Dec 2021 15:00:30 -0500 Subject: [PATCH 068/632] Fix missing class attributes when using __class_getitem__ Fixed issue where the ``__class_getitem__()`` method of the generated declarative base class by :func:`_orm.as_declarative` would lead to inaccessible class attributes such as ``__table__``, for cases where a ``Generic[T]`` style typing declaration were used in the class hierarchy. This is in continuation from the basic addition of ``__class_getitem__()`` in :ticket:`7368`. Pull request courtesy Kai Mueller. Fixes: #7462 Closes: #7470 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7470 Pull-request-sha: d5e5765e0e5542149f116ed9ccff1b3e2e32dee5 Change-Id: I6418af6d34532ff181343884bd419d9c2684e617 (cherry picked from commit 3088df9820bd62b3edff15d39a7635e86d265300) --- doc/build/changelog/unreleased_14/7462.rst | 10 ++++++++++ lib/sqlalchemy/orm/decl_api.py | 8 +++++++- test/orm/declarative/test_typing_py3k.py | 19 ++++++++++++++++++- 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7462.rst diff --git a/doc/build/changelog/unreleased_14/7462.rst b/doc/build/changelog/unreleased_14/7462.rst new file mode 100644 index 00000000000..fa71e1448f3 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7462.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm, mypy + :tickets: 7462, 7368 + + Fixed issue where the ``__class_getitem__()`` method of the generated + declarative base class by :func:`_orm.as_declarative` would lead to + inaccessible class attributes such as ``__table__``, for cases where a + ``Generic[T]`` style typing declaration were used in the class hierarchy. + This is in continuation from the basic addition of ``__class_getitem__()`` + in :ticket:`7368`. Pull request courtesy Kai Mueller. diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index b5bfb0380fc..e90b91a5a49 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -809,8 +809,14 @@ class Base(metaclass=DeclarativeMeta): class_dict["__abstract__"] = True if mapper: class_dict["__mapper_cls__"] = mapper + if hasattr(cls, "__class_getitem__"): - class_dict["__class_getitem__"] = cls.__class_getitem__ + + def __class_getitem__(cls, key): + # allow generic classes in py3.9+ + return cls + + class_dict["__class_getitem__"] = __class_getitem__ return metaclass(name, bases, class_dict) diff --git a/test/orm/declarative/test_typing_py3k.py b/test/orm/declarative/test_typing_py3k.py index 7cd70616b75..595194512d9 100644 --- a/test/orm/declarative/test_typing_py3k.py +++ b/test/orm/declarative/test_typing_py3k.py @@ -3,9 +3,13 @@ from typing import TypeVar from sqlalchemy import Column +from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy.orm import as_declarative +from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_ +from sqlalchemy.testing.assertions import expect_raises class DeclarativeBaseTest(fixtures.TestBase): @@ -19,9 +23,22 @@ class CommonBase(Generic[T]): def boring(cls: Type[T]) -> Type[T]: return cls + @classmethod + def more_boring(cls: Type[T]) -> int: + return 27 + @as_declarative() class Base(CommonBase[T]): - pass + foo = 1 class Tab(Base["Tab"]): + __tablename__ = "foo" a = Column(Integer, primary_key=True) + + eq_(Tab.foo, 1) + is_(Tab.__table__, inspect(Tab).local_table) + eq_(Tab.boring(), Tab) + eq_(Tab.more_boring(), 27) + + with expect_raises(AttributeError): + Tab.non_existent From c3d971576ddd6c2b166ce2feb353b0050eb83d2e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Dec 2021 21:07:33 -0500 Subject: [PATCH 069/632] add mariadb conf section Change-Id: I7686ab7a877895de33e9ca19217ae38cb5729238 (cherry picked from commit f37931d50c21c412418fbd19d4676042fb5292a9) --- doc/build/conf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/build/conf.py b/doc/build/conf.py index 28c04b449ec..901a1d3eadd 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -67,6 +67,7 @@ "asyncio", "postgresql", "mysql", + "mariadb", "sqlite", "mssql", "oracle", From ce51ca62587067385280b92a255b9a41a7b2380e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Dec 2021 21:08:12 -0500 Subject: [PATCH 070/632] - 1.4.29 --- doc/build/changelog/changelog_14.rst | 117 ++++++++++++++++++++- doc/build/changelog/unreleased_14/7301.rst | 7 -- doc/build/changelog/unreleased_14/7410.rst | 6 -- doc/build/changelog/unreleased_14/7432.rst | 8 -- doc/build/changelog/unreleased_14/7446.rst | 7 -- doc/build/changelog/unreleased_14/7447.rst | 10 -- doc/build/changelog/unreleased_14/7450.rst | 9 -- doc/build/changelog/unreleased_14/7457.rst | 11 -- doc/build/changelog/unreleased_14/7462.rst | 10 -- doc/build/changelog/unreleased_14/7489.rst | 13 --- doc/build/changelog/unreleased_14/7491.rst | 12 --- doc/build/changelog/unreleased_14/7496.rst | 11 -- doc/build/conf.py | 4 +- 13 files changed, 118 insertions(+), 107 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7301.rst delete mode 100644 doc/build/changelog/unreleased_14/7410.rst delete mode 100644 doc/build/changelog/unreleased_14/7432.rst delete mode 100644 doc/build/changelog/unreleased_14/7446.rst delete mode 100644 doc/build/changelog/unreleased_14/7447.rst delete mode 100644 doc/build/changelog/unreleased_14/7450.rst delete mode 100644 doc/build/changelog/unreleased_14/7457.rst delete mode 100644 doc/build/changelog/unreleased_14/7462.rst delete mode 100644 doc/build/changelog/unreleased_14/7489.rst delete mode 100644 doc/build/changelog/unreleased_14/7491.rst delete mode 100644 doc/build/changelog/unreleased_14/7496.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 006e0c585fc..3b37e7049fc 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,122 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.29 - :include_notes_from: unreleased_14 + :released: December 22, 2021 + + .. change:: + :tags: usecase, asyncio + :tickets: 7301 + + Added :func:`_asyncio.async_engine_config` function to create + an async engine from a configuration dict. This otherwise + behaves the same as :func:`_sa.engine_from_config`. + + .. change:: + :tags: bug, orm + :tickets: 7489 + + Fixed issue in new "loader criteria" method + :meth:`_orm.PropComparator.and_` where usage with a loader strategy like + :func:`_orm.selectinload` against a column that was a member of the ``.c.`` + collection of a subquery object, where the subquery would be dynamically + added to the FROM clause of the statement, would be subject to stale + parameter values within the subquery in the SQL statement cache, as the + process used by the loader strategy to replace the parameters at execution + time would fail to accommodate the subquery when received in this form. + + + .. change:: + :tags: bug, orm + :tickets: 7491 + + Fixed recursion overflow which could occur within ORM statement compilation + when using either the :func:`_orm.with_loader_criteria` feature or the the + :meth:`_orm.PropComparator.and_` method within a loader strategy in + conjunction with a subquery which referred to the same entity being altered + by the criteria option, or loaded by the loader strategy. A check for + coming across the same loader criteria option in a recursive fashion has + been added to accommodate for this scenario. + + + .. change:: + :tags: bug, orm, mypy + :tickets: 7462, 7368 + + Fixed issue where the ``__class_getitem__()`` method of the generated + declarative base class by :func:`_orm.as_declarative` would lead to + inaccessible class attributes such as ``__table__``, for cases where a + ``Generic[T]`` style typing declaration were used in the class hierarchy. + This is in continuation from the basic addition of ``__class_getitem__()`` + in :ticket:`7368`. Pull request courtesy Kai Mueller. + + .. change:: + :tags: bug, mypy + :tickets: 7496 + + Fixed mypy regression where the release of mypy 0.930 added additional + internal checks to the format of "named types", requiring that they be + fully qualified and locatable. This broke the mypy plugin for SQLAlchemy, + raising an assertion error, as there was use of symbols such as + ``__builtins__`` and other un-locatable or unqualified names that + previously had not raised any assertions. + + + .. change:: + :tags: bug, engine + :tickets: 7432 + + Corrected the error message for the ``AttributeError`` that's raised when + attempting to write to an attribute on the :class:`_result.Row` class, + which is immutable. The previous message claimed the column didn't exist + which is misleading. + + .. change:: + :tags: bug, mariadb + :tickets: 7457 + + Corrected the error classes inspected for the "is_disconnect" check for the + ``mariadbconnector`` dialect, which was failing for disconnects that + occurred due to common MySQL/MariaDB error codes such as 2006; the DBAPI + appears to currently use the ``mariadb.InterfaceError`` exception class for + disconnect errors such as error code 2006, which has been added to the list + of classes checked. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7447 + + Fixed caching-related issue where the use of a loader option of the form + ``lazyload(aliased(A).bs).joinedload(B.cs)`` would fail to result in the + joinedload being invoked for runs subsequent to the query being cached, due + to a mismatch for the options / object path applied to the objects loaded + for a query with a lead entity that used ``aliased()``. + + + .. change:: + :tags: bug, tests, regression + :tickets: 7450 + + Fixed a regression in the test suite where the test called + ``CompareAndCopyTest::test_all_present`` would fail on some platforms due + to additional testing artifacts being detected. Pull request courtesy Nils + Philippsen. + + + .. change:: + :tags: usecase, orm + :tickets: 7410 + + Added :paramref:`_orm.Session.get.execution_options` parameter which was + previously missing from the :meth:`_orm.Session.get` method. + + .. change:: + :tags: bug, engine, regression + :tickets: 7446 + + Fixed regression in the :func:`_engine.make_url` function used to parse URL + strings where the query string parsing would go into a recursion overflow + if a Python 2 ``u''`` string were used. .. changelog:: :version: 1.4.28 diff --git a/doc/build/changelog/unreleased_14/7301.rst b/doc/build/changelog/unreleased_14/7301.rst deleted file mode 100644 index 160e06c8383..00000000000 --- a/doc/build/changelog/unreleased_14/7301.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, asyncio - :tickets: 7301 - - Added :func:`_asyncio.async_engine_config` function to create - an async engine from a configuration dict. This otherwise - behaves the same as :func:`_sa.engine_from_config`. diff --git a/doc/build/changelog/unreleased_14/7410.rst b/doc/build/changelog/unreleased_14/7410.rst deleted file mode 100644 index 7b4e8efbf11..00000000000 --- a/doc/build/changelog/unreleased_14/7410.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: usecase, orm - :tickets: 7410 - - Added :paramref:`_orm.Session.get.execution_options` parameter which was - previously missing from the :meth:`_orm.Session.get` method. diff --git a/doc/build/changelog/unreleased_14/7432.rst b/doc/build/changelog/unreleased_14/7432.rst deleted file mode 100644 index 6e3f74c6710..00000000000 --- a/doc/build/changelog/unreleased_14/7432.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7432 - - Corrected the error message for the ``AttributeError`` that's raised when - attempting to write to an attribute on the :class:`_result.Row` class, - which is immutable. The previous message claimed the column didn't exist - which is misleading. diff --git a/doc/build/changelog/unreleased_14/7446.rst b/doc/build/changelog/unreleased_14/7446.rst deleted file mode 100644 index d92eb13a8db..00000000000 --- a/doc/build/changelog/unreleased_14/7446.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, engine, regression - :tickets: 7446 - - Fixed regression in the :func:`_engine.make_url` function used to parse URL - strings where the query string parsing would go into a recursion overflow - if a Python 2 ``u''`` string were used. diff --git a/doc/build/changelog/unreleased_14/7447.rst b/doc/build/changelog/unreleased_14/7447.rst deleted file mode 100644 index 3f954faba4e..00000000000 --- a/doc/build/changelog/unreleased_14/7447.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7447 - - Fixed caching-related issue where the use of a loader option of the form - ``lazyload(aliased(A).bs).joinedload(B.cs)`` would fail to result in the - joinedload being invoked for runs subsequent to the query being cached, due - to a mismatch for the options / object path applied to the objects loaded - for a query with a lead entity that used ``aliased()``. - diff --git a/doc/build/changelog/unreleased_14/7450.rst b/doc/build/changelog/unreleased_14/7450.rst deleted file mode 100644 index 56aaa1d4f78..00000000000 --- a/doc/build/changelog/unreleased_14/7450.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, tests, regression - :tickets: 7450 - - Fixed a regression in the test suite where the test called - ``CompareAndCopyTest::test_all_present`` would fail on some platforms due - to additional testing artifacts being detected. Pull request courtesy Nils - Philippsen. - diff --git a/doc/build/changelog/unreleased_14/7457.rst b/doc/build/changelog/unreleased_14/7457.rst deleted file mode 100644 index b1942b0eae2..00000000000 --- a/doc/build/changelog/unreleased_14/7457.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, mariadb - :tickets: 7457 - - Corrected the error classes inspected for the "is_disconnect" check for the - ``mariadbconnector`` dialect, which was failing for disconnects that - occurred due to common MySQL/MariaDB error codes such as 2006; the DBAPI - appears to currently use the ``mariadb.InterfaceError`` exception class for - disconnect errors such as error code 2006, which has been added to the list - of classes checked. - diff --git a/doc/build/changelog/unreleased_14/7462.rst b/doc/build/changelog/unreleased_14/7462.rst deleted file mode 100644 index fa71e1448f3..00000000000 --- a/doc/build/changelog/unreleased_14/7462.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm, mypy - :tickets: 7462, 7368 - - Fixed issue where the ``__class_getitem__()`` method of the generated - declarative base class by :func:`_orm.as_declarative` would lead to - inaccessible class attributes such as ``__table__``, for cases where a - ``Generic[T]`` style typing declaration were used in the class hierarchy. - This is in continuation from the basic addition of ``__class_getitem__()`` - in :ticket:`7368`. Pull request courtesy Kai Mueller. diff --git a/doc/build/changelog/unreleased_14/7489.rst b/doc/build/changelog/unreleased_14/7489.rst deleted file mode 100644 index 4af33f42985..00000000000 --- a/doc/build/changelog/unreleased_14/7489.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7489 - - Fixed issue in new "loader criteria" method - :meth:`_orm.PropComparator.and_` where usage with a loader strategy like - :func:`_orm.selectinload` against a column that was a member of the ``.c.`` - collection of a subquery object, where the subquery would be dynamically - added to the FROM clause of the statement, would be subject to stale - parameter values within the subquery in the SQL statement cache, as the - process used by the loader strategy to replace the parameters at execution - time would fail to accommodate the subquery when received in this form. - diff --git a/doc/build/changelog/unreleased_14/7491.rst b/doc/build/changelog/unreleased_14/7491.rst deleted file mode 100644 index f1a19525bb1..00000000000 --- a/doc/build/changelog/unreleased_14/7491.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7491 - - Fixed recursion overflow which could occur within ORM statement compilation - when using either the :func:`_orm.with_loader_criteria` feature or the the - :meth:`_orm.PropComparator.and_` method within a loader strategy in - conjunction with a subquery which referred to the same entity being altered - by the criteria option, or loaded by the loader strategy. A check for - coming across the same loader criteria option in a recursive fashion has - been added to accommodate for this scenario. - diff --git a/doc/build/changelog/unreleased_14/7496.rst b/doc/build/changelog/unreleased_14/7496.rst deleted file mode 100644 index cc5875fcc1c..00000000000 --- a/doc/build/changelog/unreleased_14/7496.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, mypy - :tickets: 7496 - - Fixed mypy regression where the release of mypy 0.930 added additional - internal checks to the format of "named types", requiring that they be - fully qualified and locatable. This broke the mypy plugin for SQLAlchemy, - raising an assertion error, as there was use of symbols such as - ``__builtins__`` and other un-locatable or unqualified names that - previously had not raised any assertions. - diff --git a/doc/build/conf.py b/doc/build/conf.py index 901a1d3eadd..74b5a8014f2 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -205,9 +205,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.28" +release = "1.4.29" -release_date = "December 9, 2021" +release_date = "December 22, 2021" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 6cdb3331c6a3331faf74f134ec0d04c7f217c454 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Dec 2021 21:19:30 -0500 Subject: [PATCH 071/632] Version 1.4.30 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3b37e7049fc..11d57f87b3a 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.30 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.29 :released: December 22, 2021 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index d249fbbcc14..f4241074e51 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.29" +__version__ = "1.4.30" def __go(lcls): From 5d12593ae142ea8f377916f785b59c33d65046a4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 26 Dec 2021 11:25:00 -0500 Subject: [PATCH 072/632] restore graceful degrade of subqueryload w from_statement Fixed regression from 1.3 where the "subqueryload" loader strategy would fail with a stack trace if used against a query that made use of :meth:`_orm.Query.from_statement` or :meth:`_sql.Select.from_statement`. As subqueryload requires modifying the original statement, it's not compatible with the "from_statement" use case, especially for statements made against the :func:`_sql.text` construct. The behavior now is equivalent to that of 1.3 and previously, which is that the loader strategy silently degrades to not be used for such statements, typically falling back to using the lazyload strategy. Fixes: #7505 Change-Id: I950800dc86a77f8320a5e696edce1ff2c84b1eb9 (cherry picked from commit 818d62be00530549aa52dd5d981819010e4ae484) --- doc/build/changelog/unreleased_14/7505.rst | 14 ++++ lib/sqlalchemy/orm/context.py | 6 ++ lib/sqlalchemy/orm/strategies.py | 6 ++ test/orm/test_eager_relations.py | 93 ++++++++++++++++++++++ test/orm/test_query.py | 16 ++-- test/orm/test_selectin_relations.py | 42 ++++++++++ test/orm/test_subquery_relations.py | 66 +++++++++++++++ 7 files changed, 237 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7505.rst diff --git a/doc/build/changelog/unreleased_14/7505.rst b/doc/build/changelog/unreleased_14/7505.rst new file mode 100644 index 00000000000..b017c0ae138 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7505.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7505 + + Fixed regression from 1.3 where the "subqueryload" loader strategy would + fail with a stack trace if used against a query that made use of + :meth:`_orm.Query.from_statement` or :meth:`_sql.Select.from_statement`. As + subqueryload requires modifying the original statement, it's not compatible + with the "from_statement" use case, especially for statements made against + the :func:`_sql.text` construct. The behavior now is equivalent to that of + 1.3 and previously, which is that the loader strategy silently degrades to + not be used for such statements, typically falling back to using the + lazyload strategy. + diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index b828bcb4601..130fe67af62 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -382,6 +382,12 @@ def _mapper_loads_polymorphically_with(self, mapper, adapter): for m in m2.iterate_to_root(): # TODO: redundant ? self._polymorphic_adapters[m.local_table] = adapter + @classmethod + def _create_entities_collection(cls, query, legacy): + raise NotImplementedError( + "this method only works for ORMSelectCompileState" + ) + @sql.base.CompileState.plugin_for("orm", "orm_from_statement") class ORMFromStatementCompileState(ORMCompileState): diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 71c4a697611..679b35a21ed 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -27,6 +27,7 @@ from .base import _SET_DEFERRED_EXPIRED from .context import _column_descriptions from .context import ORMCompileState +from .context import ORMSelectCompileState from .context import QueryContext from .interfaces import LoaderStrategy from .interfaces import StrategizedProperty @@ -1801,6 +1802,11 @@ def create_row_processor( # the other post loaders, however we have this here for consistency elif self._check_recursive_postload(context, path, self.join_depth): return + elif not isinstance(context.compile_state, ORMSelectCompileState): + # issue 7505 - subqueryload() in 1.3 and previous would silently + # degrade for from_statement() without warning. this behavior + # is restored here + return if not self.parent.class_manager[self.key].impl.supports_population: raise sa_exc.InvalidRequestError( diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index 2ab2bba5c5a..d9da36073c0 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -86,6 +86,99 @@ def test_basic(self): ) eq_(self.static.user_address_result, q.order_by(User.id).all()) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(joinedload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(joinedload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + # joinedload can't be applied here so this necessarily + # has to lazy load the addresses + self.assert_sql_count(testing.db, go, 2) + + @testing.combinations(True, False) + def test_from_statement_contains_eager(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + + sess = fixture_session() + + # for contains_eager, Address.id is enough for it to be picked up + stmt = ( + select(User, Address.id).where(User.id == 7).join(User.addresses) + ) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(contains_eager(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(contains_eager(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + # joinedload can't be applied here so this necessarily + # has to lazy load the addresses + self.assert_sql_count(testing.db, go, 1) + def test_no_render_in_subquery(self): """test #6378""" diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 8bf3dcdb5c5..a1dbb2f617b 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -5976,12 +5976,11 @@ def test_textual_select_orm_columns(self): ( False, subqueryload, - # sqlite seems happy to interpret the broken SQL and give you the - # correct result somehow, this is a bug in SQLite so don't rely - # upon it doing that - testing.fails("not working yet") + testing.skip_if("sqlite"), ), - (True, subqueryload, testing.fails("not sure about implementation")), + ( + True, + subqueryload, + ), (False, selectinload), (True, selectinload), ) @@ -6009,7 +6008,12 @@ def test_related_eagerload_against_text(self, add_columns, loader_option): def go(): eq_(set(q.all()), set(self.static.user_address_result)) - self.assert_sql_count(testing.db, go, 2) + if loader_option is subqueryload: + # subqueryload necessarily degrades to lazy loads for a text + # statement. + self.assert_sql_count(testing.db, go, 5) + else: + self.assert_sql_count(testing.db, go, 2) def test_whereclause(self): User = self.classes.User diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index 2add1015ffc..7a5bb0e7edb 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -89,6 +89,48 @@ def go(): self.assert_sql_count(testing.db, go, 2) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + def go(): + if legacy: + ret = ( + sess.query(User) + .from_statement(stmt) + .options(selectinload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(selectinload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + self.assert_sql_count(testing.db, go, 2) + def user_dingaling_fixture(self): users, Dingaling, User, dingalings, Address, addresses = ( self.tables.users, diff --git a/test/orm/test_subquery_relations.py b/test/orm/test_subquery_relations.py index 5be0042b0da..bf14d7212a4 100644 --- a/test/orm/test_subquery_relations.py +++ b/test/orm/test_subquery_relations.py @@ -28,6 +28,7 @@ from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.assertsql import Or from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -89,6 +90,71 @@ def go(): self.assert_sql_count(testing.db, go, 2) + @testing.combinations(True, False) + def test_from_statement(self, legacy): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively(Address, addresses), + order_by=Address.id, + ) + }, + ) + sess = fixture_session() + + stmt = select(User).where(User.id == 7) + + with self.sql_execution_asserter(testing.db) as asserter: + if legacy: + ret = ( + sess.query(User) + # .where(User.id == 7) + .from_statement(stmt) + .options(subqueryload(User.addresses)) + .all() + ) + else: + ret = sess.scalars( + select(User) + .from_statement(stmt) + .options(subqueryload(User.addresses)) + ).all() + + eq_(self.static.user_address_result[0:1], ret) + + asserter.assert_( + Or( + CompiledSQL( + "SELECT users.id AS users_id, users.name AS users_name " + "FROM users WHERE users.id = :id_1", + [{"id_1": 7}], + ), + CompiledSQL( + "SELECT users.id, users.name " + "FROM users WHERE users.id = :id_1", + [{"id_1": 7}], + ), + ), + # issue 7505 + # subqueryload degrades for a from_statement. this is a lazyload + CompiledSQL( + "SELECT addresses.id AS addresses_id, addresses.user_id AS " + "addresses_user_id, addresses.email_address AS " + "addresses_email_address FROM addresses " + "WHERE :param_1 = addresses.user_id ORDER BY addresses.id", + [{"param_1": 7}], + ), + ) + def test_params_arent_cached(self): users, Address, addresses, User = ( self.tables.users, From 5ed850b58c679f87edb218c24c2403a3e787f637 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 26 Dec 2021 12:13:19 -0500 Subject: [PATCH 073/632] include empty intermediary tables in optimized get Fixed issue in joined-inheritance load of additional attributes functionality in deep multi-level inheritance where an intermediary table that contained no columns would not be included in the tables joined, instead linking those tables to their primary key identifiers. While this works fine, it nonetheless in 1.4 began producing the cartesian product compiler warning. The logic has been changed so that these intermediary tables are included regardless. While this does include additional tables in the query that are not technically necessary, this only occurs for the highly unusual case of deep 3+ level inheritance with intermediary tables that have no non primary key columns, potential performance impact is therefore expected to be negligible. Fixes: #7507 Change-Id: Id2073773e97a0853b744b51feeb2bc4437032e51 (cherry picked from commit c1d2fbac4c399b47f4715f7ea2a1147374d2aa43) --- doc/build/changelog/unreleased_14/7507.rst | 15 +++++ lib/sqlalchemy/orm/mapper.py | 37 +++++++------ test/orm/inheritance/test_basic.py | 64 ++++++++++++++++++++++ 3 files changed, 100 insertions(+), 16 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7507.rst diff --git a/doc/build/changelog/unreleased_14/7507.rst b/doc/build/changelog/unreleased_14/7507.rst new file mode 100644 index 00000000000..7412c7f0ce9 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7507.rst @@ -0,0 +1,15 @@ +.. change:: + :tags: bug, orm + :tickets: 7507 + + Fixed issue in joined-inheritance load of additional attributes + functionality in deep multi-level inheritance where an intermediary table + that contained no columns would not be included in the tables joined, + instead linking those tables to their primary key identifiers. While this + works fine, it nonetheless in 1.4 began producing the cartesian product + compiler warning. The logic has been changed so that these intermediary + tables are included regardless. While this does include additional tables + in the query that are not technically necessary, this only occurs for the + highly unusual case of deep 3+ level inheritance with intermediary tables + that have no non primary key columns, potential performance impact is + therefore expected to be negligible. diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 4de12b88c77..9ac18bea664 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -3024,23 +3024,28 @@ def visit_binary(binary): allconds = [] + start = False + + # as of #7507, from the lowest base table on upwards, + # we include all intermediary tables. + + for mapper in reversed(list(self.iterate_to_root())): + if mapper.local_table in tables: + start = True + elif not isinstance(mapper.local_table, expression.TableClause): + return None + if start and not mapper.single: + allconds.append(mapper.inherit_condition) + tables.add(mapper.local_table) + + # only the bottom table needs its criteria to be altered to fit + # the primary key ident - the rest of the tables upwards to the + # descendant-most class should all be present and joined to each + # other. try: - start = False - for mapper in reversed(list(self.iterate_to_root())): - if mapper.local_table in tables: - start = True - elif not isinstance( - mapper.local_table, expression.TableClause - ): - return None - if start and not mapper.single: - allconds.append( - visitors.cloned_traverse( - mapper.inherit_condition, - {}, - {"binary": visit_binary}, - ) - ) + allconds[0] = visitors.cloned_traverse( + allconds[0], {}, {"binary": visit_binary} + ) except _OptGetColumnsNotAvailable: return None diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index ac1661fdd16..446a9d9bd92 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -2696,6 +2696,70 @@ def _key_fallback(self, key, raiseerr): eq_(s1.sub, "s1sub") + def test_optimized_get_blank_intermediary(self, registry, connection): + """test #7507""" + + Base = registry.generate_base() + + class A(Base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + a = Column(String(20), nullable=False) + type_ = Column(String(20)) + __mapper_args__ = { + "polymorphic_on": type_, + "polymorphic_identity": "a", + } + + class B(A): + __tablename__ = "b" + __mapper_args__ = {"polymorphic_identity": "b"} + + id = Column(Integer, ForeignKey("a.id"), primary_key=True) + b = Column(String(20), nullable=False) + + class C(B): + __tablename__ = "c" + __mapper_args__ = {"polymorphic_identity": "c"} + + id = Column(Integer, ForeignKey("b.id"), primary_key=True) + + class D(C): + __tablename__ = "d" + __mapper_args__ = {"polymorphic_identity": "d"} + + id = Column(Integer, ForeignKey("c.id"), primary_key=True) + c = Column(String(20), nullable=False) + + Base.metadata.create_all(connection) + + session = Session(connection) + session.add(D(a="x", b="y", c="z")) + session.commit() + + with self.sql_execution_asserter(connection) as asserter: + d = session.query(A).one() + eq_(d.c, "z") + asserter.assert_( + CompiledSQL( + "SELECT a.id AS a_id, a.a AS a_a, a.type_ AS a_type_ FROM a", + [], + ), + Or( + CompiledSQL( + "SELECT d.c AS d_c, b.b AS b_b FROM d, b, c WHERE " + ":param_1 = b.id AND b.id = c.id AND c.id = d.id", + [{"param_1": 1}], + ), + CompiledSQL( + "SELECT b.b AS b_b, d.c AS d_c FROM b, d, c WHERE " + ":param_1 = b.id AND b.id = c.id AND c.id = d.id", + [{"param_1": 1}], + ), + ), + ) + def test_optimized_passes(self): """ "test that the 'optimized load' routine doesn't crash when a column in the join condition is not available.""" From dd8e33d84f9d0449c14e51a4e3ffdefdd470f961 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Mon, 20 Dec 2021 14:37:13 -0700 Subject: [PATCH 074/632] Reflect included columns as dialect_options Fixed reflection of covering indexes to report ``include_columns`` as part of the ``dialect_options`` entry in the reflected index dictionary, thereby enabling round trips from reflection->create to be complete. Included columns continue to also be present under the ``include_columns`` key for backwards compatibility. Fixes: #7382 Change-Id: I4f16b65caed3a36d405481690a3a92432b5efd62 (cherry picked from commit 52e352a190af750c3c28a5390b2ad55b20d1b752) --- doc/build/changelog/unreleased_14/7382.rst | 9 +++++++++ lib/sqlalchemy/dialects/mssql/base.py | 6 ++++++ lib/sqlalchemy/dialects/postgresql/base.py | 6 ++++++ lib/sqlalchemy/testing/suite/test_reflection.py | 14 ++++++++++++++ test/dialect/postgresql/test_reflection.py | 7 +++++++ 5 files changed, 42 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7382.rst diff --git a/doc/build/changelog/unreleased_14/7382.rst b/doc/build/changelog/unreleased_14/7382.rst new file mode 100644 index 00000000000..db6ae453114 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7382.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, reflection, postgresql, mssql + :tickets: 7382 + + Fixed reflection of covering indexes to report ``include_columns`` as part + of the ``dialect_options`` entry in the reflected index dictionary, thereby + enabling round trips from reflection->create to be complete. Included + columns continue to also be present under the ``include_columns`` key for + backwards compatibility. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index d6a35c93768..defff295284 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -3070,6 +3070,12 @@ def get_indexes(self, connection, tablename, dbname, owner, schema, **kw): indexes[row["index_id"]]["column_names"].append( row["name"] ) + for index_info in indexes.values(): + # NOTE: "root level" include_columns is legacy, now part of + # dialect_options (issue #7382) + index_info.setdefault("dialect_options", {})[ + "mssql_include" + ] = index_info["include_columns"] return list(indexes.values()) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index e58e430e85d..4273ff474af 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -4277,6 +4277,8 @@ def get_indexes(self, connection, table_name, schema, **kw): "column_names": [idx["cols"][i] for i in idx["key"]], } if self.server_version_info >= (11, 0): + # NOTE: this is legacy, this is part of dialect_options now + # as of #7382 entry["include_columns"] = [idx["cols"][i] for i in idx["inc"]] if "duplicates_constraint" in idx: entry["duplicates_constraint"] = idx["duplicates_constraint"] @@ -4285,6 +4287,10 @@ def get_indexes(self, connection, table_name, schema, **kw): (idx["cols"][idx["key"][i]], value) for i, value in idx["sorting"].items() ) + if "include_columns" in entry: + entry.setdefault("dialect_options", {})[ + "postgresql_include" + ] = entry["include_columns"] if "options" in idx: entry.setdefault("dialect_options", {})[ "postgresql_with" diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 6e6201de977..a1f2e3bc94b 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1206,6 +1206,9 @@ def test_reflect_expression_based_indexes(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"] = { + "%s_include" % connection.engine.name: [] + } with expect_warnings( "Skipped unsupported reflection of expression-based index t_idx" @@ -1238,10 +1241,21 @@ def test_reflect_covering_index(self, metadata, connection): "column_names": ["x"], "include_columns": ["y"], "unique": False, + "dialect_options": { + "%s_include" % connection.engine.name: ["y"] + }, } ], ) + t2 = Table("t", MetaData(), autoload_with=connection) + eq_( + list(t2.indexes)[0].dialect_options[connection.engine.name][ + "include" + ], + ["y"], + ) + def _type_round_trip(self, connection, metadata, *types): t = Table( "t", diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index fa90ec212fc..5ee11ccd8c9 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -183,6 +183,7 @@ def test_reflect_index(self, connection): "unique": False, "column_names": ["q"], "include_columns": [], + "dialect_options": {"postgresql_include": []}, } ], ) @@ -198,6 +199,7 @@ def test_reflect_index_from_partition(self, connection): { "column_names": ["q"], "include_columns": [], + "dialect_options": {"postgresql_include": []}, "name": mock.ANY, "unique": False, } @@ -1131,6 +1133,7 @@ def test_index_reflection_modified(self, metadata, connection): expected = [{"name": "idx1", "unique": False, "column_names": ["y"]}] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"] = {"postgresql_include": []} eq_(ind, expected) @@ -1163,6 +1166,7 @@ def test_index_reflection_with_storage_options(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(ind, expected) m = MetaData() @@ -1195,6 +1199,7 @@ def test_index_reflection_with_access_method(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(ind, expected) m = MetaData() t1 = Table("t", m, autoload_with=connection) @@ -1229,6 +1234,7 @@ def test_index_reflection_with_include(self, metadata, connection): "unique": False, "column_names": ["x"], "include_columns": ["name"], + "dialect_options": {"postgresql_include": ["name"]}, "name": "idx1", } ], @@ -1604,6 +1610,7 @@ def test_reflection_with_exclude_constraint(self, metadata, connection): ] if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] + expected[0]["dialect_options"]["postgresql_include"] = [] eq_(insp.get_indexes("t"), expected) From ead6c7f8aee866cd9b5c901b762725b1cb16e151 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 30 Dec 2021 23:29:55 +0100 Subject: [PATCH 075/632] Imrpove MySQL/MariaDB dialect initialization. Replace ``SHOW VARIABLES LIKE`` statement with equivalent ``SELECT @@variable`` in MySQL and MariaDB dialect initialization. This should avoid mutex contention caused by ``SHOW VARIABLES``, improving initialization performance. Change-Id: Id836ef534fcc1473c7aaf9270d08a4da9b8f62cf closes: #7518 (cherry picked from commit 5e06f0f579df45116696fed78d65abcccc1dc3e3) --- doc/build/changelog/unreleased_14/7518.rst | 8 ++++++ lib/sqlalchemy/dialects/mysql/base.py | 19 ++++++-------- lib/sqlalchemy/dialects/mysql/pyodbc.py | 29 +++++++++------------- test/dialect/mysql/test_dialect.py | 2 +- 4 files changed, 29 insertions(+), 29 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7518.rst diff --git a/doc/build/changelog/unreleased_14/7518.rst b/doc/build/changelog/unreleased_14/7518.rst new file mode 100644 index 00000000000..6264297cb1f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7518.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: change, mysql + :tickets: 7518 + + Replace ``SHOW VARIABLES LIKE`` statement with equivalent + ``SELECT @@variable`` in MySQL and MariaDB dialect initialization. + This should avoid mutex contention caused by ``SHOW VARIABLES``, + improving initialization performance. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 91356500f7f..6d9c22e9628 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -3115,24 +3115,21 @@ def _detect_casing(self, connection): # https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html charset = self._connection_charset - show_var = connection.execute( - sql.text("SHOW VARIABLES LIKE 'lower_case_table_names'") - ) - row = self._compat_first( - show_var, - charset=charset, + show_var = connection.exec_driver_sql( + "SELECT @@lower_case_table_names" ) + row = self._compat_first(show_var, charset=charset) if not row: cs = 0 else: # 4.0.15 returns OFF or ON according to [ticket:489] # 3.23 doesn't, 4.0.27 doesn't.. - if row[1] == "OFF": + if row[0] == "OFF": cs = 0 - elif row[1] == "ON": + elif row[0] == "ON": cs = 1 else: - cs = int(row[1]) + cs = int(row[0]) self._casing = cs return cs @@ -3151,7 +3148,7 @@ def _detect_collations(self, connection): def _detect_sql_mode(self, connection): row = self._compat_first( - connection.exec_driver_sql("SHOW VARIABLES LIKE 'sql_mode'"), + connection.exec_driver_sql("SELECT @@sql_mode"), charset=self._connection_charset, ) @@ -3162,7 +3159,7 @@ def _detect_sql_mode(self, connection): ) self._sql_mode = "" else: - self._sql_mode = row[1] or "" + self._sql_mode = row[0] or "" def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 69cc6487d15..9cae7b71452 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -43,11 +43,11 @@ """ # noqa import re -import sys from .base import MySQLDialect from .base import MySQLExecutionContext from .types import TIME +from ... import exc from ... import util from ...connectors.pyodbc import PyODBCConnector from ...sql.sqltypes import Time @@ -88,13 +88,14 @@ def _detect_charset(self, connection): # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. - rs = connection.exec_driver_sql( - "SHOW VARIABLES LIKE 'character_set%%'" - ) - opts = {row[0]: row[1] for row in self._compat_fetchall(rs)} - for key in ("character_set_connection", "character_set"): - if opts.get(key, None): - return opts[key] + try: + value = connection.exec_driver_sql( + "select @@character_set_client" + ).scalar() + if value: + return value + except exc.DBAPIError: + pass util.warn( "Could not detect the connection character set. " @@ -121,15 +122,9 @@ def on_connect(conn): # https://github.com/mkleehammer/pyodbc/wiki/Unicode pyodbc_SQL_CHAR = 1 # pyodbc.SQL_CHAR pyodbc_SQL_WCHAR = -8 # pyodbc.SQL_WCHAR - if sys.version_info.major > 2: - conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") - conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") - conn.setencoding(encoding="utf-8") - else: - conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") - conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") - conn.setencoding(str, encoding="utf-8") - conn.setencoding(unicode, encoding="utf-8") # noqa: F821 + conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8") + conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8") + conn.setencoding(encoding="utf-8") return on_connect diff --git a/test/dialect/mysql/test_dialect.py b/test/dialect/mysql/test_dialect.py index f314bd0af55..016014d5421 100644 --- a/test/dialect/mysql/test_dialect.py +++ b/test/dialect/mysql/test_dialect.py @@ -60,7 +60,7 @@ def test_no_show_variables(self): engine = engines.testing_engine() def my_execute(self, statement, *args, **kw): - if statement.startswith("SHOW VARIABLES"): + if statement.startswith("SELECT @@"): statement = "SELECT 1 FROM DUAL WHERE 1=0" return real_exec(self, statement, *args, **kw) From 6b646139bbff267d4f84943c982808fc33ea9cb9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 3 Jan 2022 17:28:52 -0500 Subject: [PATCH 076/632] ensure correlate_except is checked for empty tuple Fixed issue where :meth:`_sql.Select.correlate_except` method, when passed either the ``None`` value or no arguments, would not correlate any elements when used in an ORM context (that is, passing ORM entities as FROM clauses), rather than causing all FROM elements to be considered as "correlated" in the same way which occurs when using Core-only constructs. Fixes: #7514 Change-Id: Ic4a5252c8f3c1140aba6c308264948f3a91f33f5 (cherry picked from commit 709239f4a61e88c2051dce87eb4058efd653697e) --- doc/build/changelog/unreleased_14/7514.rst | 9 +++++++ lib/sqlalchemy/orm/context.py | 4 +-- test/orm/test_core_compilation.py | 31 ++++++++++++++++++++++ test/sql/test_compiler.py | 8 ++++++ 4 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7514.rst diff --git a/doc/build/changelog/unreleased_14/7514.rst b/doc/build/changelog/unreleased_14/7514.rst new file mode 100644 index 00000000000..bf6fd471eed --- /dev/null +++ b/doc/build/changelog/unreleased_14/7514.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 7514 + + Fixed issue where :meth:`_sql.Select.correlate_except` method, when passed + either the ``None`` value or no arguments, would not correlate any elements + when used in an ORM context (that is, passing ORM entities as FROM + clauses), rather than causing all FROM elements to be considered as + "correlated" in the same way which occurs when using Core-only constructs. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 130fe67af62..edbe45ac210 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -787,7 +787,7 @@ def _setup_for_generate(self): for s in query._correlate ) ) - elif query._correlate_except: + elif query._correlate_except is not None: self.correlate_except = tuple( util.flatten_iterator( sql_util.surface_selectables(s) if s is not None else None @@ -1229,7 +1229,7 @@ def _select_statement( if correlate: statement.correlate.non_generative(statement, *correlate) - if correlate_except: + if correlate_except is not None: statement.correlate_except.non_generative( statement, *correlate_except ) diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 5d66e339ab1..7948c016ba2 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -1,8 +1,10 @@ from sqlalchemy import bindparam +from sqlalchemy import Column from sqlalchemy import exc from sqlalchemy import func from sqlalchemy import insert from sqlalchemy import inspect +from sqlalchemy import Integer from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import or_ @@ -31,12 +33,15 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.util import resolve_lambda +from sqlalchemy.util.langhelpers import hybridproperty from .inheritance import _poly_fixtures from .test_query import QueryTest +from ..sql.test_compiler import CorrelateTest as _CoreCorrelateTest # TODO: # composites / unions, etc. @@ -2320,3 +2325,29 @@ class Foo(object): ) self.assert_compile(stmt1, expected) self.assert_compile(stmt2, expected) + + +class CorrelateTest(fixtures.DeclarativeMappedTest, _CoreCorrelateTest): + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class T1(Base): + __tablename__ = "t1" + a = Column(Integer, primary_key=True) + + @hybridproperty + def c(self): + return self + + class T2(Base): + __tablename__ = "t2" + a = Column(Integer, primary_key=True) + + @hybridproperty + def c(self): + return self + + def _fixture(self): + t1, t2 = self.classes("T1", "T2") + return t1, t2, select(t1).where(t1.c.a == t2.c.a) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index eeb102162d6..50fd582b7d6 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -5902,6 +5902,14 @@ def test_correlate_except_none(self, value): ) ) + def test_correlate_except_empty(self): + t1, t2, s1 = self._fixture() + self._assert_where_all_correlated( + select(t1, t2).where( + t2.c.a == s1.correlate_except().scalar_subquery() + ) + ) + def test_correlate_except_having(self): t1, t2, s1 = self._fixture() self._assert_having_correlated( From 38ca8ed88b90f5f5a35853babaa62012a09e06ac Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 4 Jan 2022 14:08:10 -0500 Subject: [PATCH 077/632] improve custom operator for SQL types docs introduce here that "custom ops" first come from the .op() method in the usual case. then only if one wants such an op to be pre-assocaited with particular types, then the comparator may be used. Also clarify the individual points regarding the comparator. Change-Id: Id6046448eb2c17fa6e3f2ef6d9343b156ddec96f (cherry picked from commit 680264970a4c9199c6e1dc91f855bdb08a90b4cc) --- doc/build/core/custom_types.rst | 68 +++++++++++++++++++++++---------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 1b6a91be186..5f9e0555d63 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -492,13 +492,35 @@ explicit methods on column expressions, such as :meth:`.ColumnOperators.in_` (``table.c.value.in_(['x', 'y'])``) and :meth:`.ColumnOperators.like` (``table.c.value.like('%ed%')``). -The Core expression constructs in all cases consult the type of the expression in order to determine -the behavior of existing operators, as well as to locate additional operators that aren't part of -the built-in set. The :class:`.TypeEngine` base class defines a root "comparison" implementation -:class:`.TypeEngine.Comparator`, and many specific types provide their own sub-implementations of this -class. User-defined :class:`.TypeEngine.Comparator` implementations can be built directly into a -simple subclass of a particular type in order to override or define new operations. Below, -we create a :class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` operator:: +When the need arises for a SQL operator that isn't directly supported by the +already supplied methods above, the most expedient way to produce this operator is +to use the :meth:`_sql.Operators.op` method on any SQL expression object; this method +is given a string representing the SQL operator to render, and the return value +is a Python callable that accepts any arbitrary right-hand side expression:: + + >>> from sqlalchemy import column + >>> expr = column('x').op('>>')(column('y')) + >>> print(expr) + x >> y + +When making use of custom SQL types, there is also a means of implementing +custom operators as above that are automatically present upon any column +expression that makes use of that column type, without the need to directly +call :meth:`_sql.Operators.op` each time the operator is to be used. + +To achieve this, a SQL +expression construct consults the :class:`_types.TypeEngine` object associated +with the construct in order to determine the behavior of the built-in +operators as well as to look for new methods that may have been invoked. +:class:`.TypeEngine` defines a +"comparison" object implemented by the :class:`.TypeEngine.Comparator` class to provide the base +behavior for SQL operators, and many specific types provide their own +sub-implementations of this class. User-defined :class:`.TypeEngine.Comparator` +implementations can be built directly into a simple subclass of a particular +type in order to override or define new operations. Below, we create a +:class:`.Integer` subclass which overrides the :meth:`.ColumnOperators.__add__` +operator, which in turn uses :meth:`_sql.Operators.op` to produce the custom +SQL itself:: from sqlalchemy import Integer @@ -520,26 +542,21 @@ Usage:: The implementation for :meth:`.ColumnOperators.__add__` is consulted by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with -itself as the ``expr`` attribute. The mechanics of the expression -system are such that operations continue recursively until an -expression object produces a new SQL expression construct. Above, we -could just as well have said ``self.expr.op("goofy")(other)`` instead -of ``self.op("goofy")(other)``. +itself as the ``expr`` attribute. This attribute may be used when the +implementation needs to refer to the originating :class:`_sql.ColumnElement` +object directly:: -When using :meth:`.Operators.op` for comparison operations that return a -boolean result, the :paramref:`.Operators.op.is_comparison` flag should be -set to ``True``:: + from sqlalchemy import Integer class MyInt(Integer): class comparator_factory(Integer.Comparator): - def is_frobnozzled(self, other): - return self.op("--is_frobnozzled->", is_comparison=True)(other) + def __add__(self, other): + return func.special_addition(self.expr, other) New methods added to a :class:`.TypeEngine.Comparator` are exposed on an -owning SQL expression -using a ``__getattr__`` scheme, which exposes methods added to -:class:`.TypeEngine.Comparator` onto the owning :class:`_expression.ColumnElement`. -For example, to add a ``log()`` function +owning SQL expression object using a dynamic lookup scheme, which exposes methods added to +:class:`.TypeEngine.Comparator` onto the owning :class:`_expression.ColumnElement` +expression construct. For example, to add a ``log()`` function to integers:: from sqlalchemy import Integer, func @@ -554,6 +571,15 @@ Using the above type:: >>> print(sometable.c.data.log(5)) log(:log_1, :log_2) +When using :meth:`.Operators.op` for comparison operations that return a +boolean result, the :paramref:`.Operators.op.is_comparison` flag should be +set to ``True``:: + + class MyInt(Integer): + class comparator_factory(Integer.Comparator): + def is_frobnozzled(self, other): + return self.op("--is_frobnozzled->", is_comparison=True)(other) + Unary operations are also possible. For example, to add an implementation of the PostgreSQL factorial operator, we combine the :class:`.UnaryExpression` construct From e916d00fd7c28424a22d424202643d258acb23d7 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 10 Dec 2021 14:18:34 +0100 Subject: [PATCH 078/632] Improve array of enum handling. Fixed handling of array of enum values which require escape characters. Fixes: #7418 Change-Id: I50525846f6029dfea9a8ad1cb913424d168d5f62 (cherry picked from commit 94afc4f5fc842160468cf7175552125eebf7a510) --- doc/build/changelog/unreleased_14/7418.rst | 5 ++++ lib/sqlalchemy/dialects/postgresql/array.py | 29 +++++++++++++++++++-- test/dialect/postgresql/test_types.py | 29 +++++++++++++++++++++ 3 files changed, 61 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7418.rst diff --git a/doc/build/changelog/unreleased_14/7418.rst b/doc/build/changelog/unreleased_14/7418.rst new file mode 100644 index 00000000000..e1e192571d7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7418.rst @@ -0,0 +1,5 @@ +.. change:: + :tags: bug, postgresql + :tickets: 7418 + + Fixed handling of array of enum values which require escape characters. diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index e57a4fc9acc..4f296e8ef13 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -367,10 +367,11 @@ def process(value): if self._against_native_enum: super_rp = process + pattern = re.compile(r"^{(.*)}$") def handle_raw_string(value): - inner = re.match(r"^{(.*)}$", value).group(1) - return inner.split(",") if inner else [] + inner = pattern.match(value).group(1) + return _split_enum_values(inner) def process(value): if value is None: @@ -385,3 +386,27 @@ def process(value): ) return process + + +def _split_enum_values(array_string): + if '"' not in array_string: + # no escape char is present so it can just split on the comma + return array_string.split(",") + + # handles quoted strings from: + # r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr' + # returns + # ['abc', 'quoted', 'also\\quoted', 'quoted, comma', 'esc " quot', 'qpr'] + text = array_string.replace(r"\"", "_$ESC_QUOTE$_") + text = text.replace(r"\\", "\\") + result = [] + on_quotes = re.split(r'(")', text) + in_quotes = False + for tok in on_quotes: + if tok == '"': + in_quotes = not in_quotes + elif in_quotes: + result.append(tok.replace("_$ESC_QUOTE$_", '"')) + else: + result.extend(re.findall(r"([^\s,]+),?", tok)) + return result diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index 4f26a6ef661..bbd5cadda12 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -1954,6 +1954,23 @@ def __eq__(self, other): def __ne__(self, other): return not self.__eq__(other) + difficult_enum = [ + "Value", + "With space", + "With,comma", + 'With"quote', + "With\\escape", + """Various!@#$%^*()"'\\][{};:.<>|_+~chars""", + ] + + def make_difficult_enum(cls_, native): + return cls_( + *difficult_enum, name="difficult_enum", native_enum=native + ) + + def difficult_enum_values(x): + return [v for i, v in enumerate(difficult_enum) if i != x - 1] + elements = [ (sqltypes.Integer, lambda x: [1, x, 3, 4, 5]), (sqltypes.Text, str_values), @@ -2041,6 +2058,18 @@ def __ne__(self, other): (sqltypes.Enum(AnEnum, native_enum=True), enum_values), (sqltypes.Enum(AnEnum, native_enum=False), enum_values), (postgresql.ENUM(AnEnum, native_enum=True), enum_values), + ( + make_difficult_enum(sqltypes.Enum, native=True), + difficult_enum_values, + ), + ( + make_difficult_enum(sqltypes.Enum, native=False), + difficult_enum_values, + ), + ( + make_difficult_enum(postgresql.ENUM, native=True), + difficult_enum_values, + ), ] if not exclude_json: From 4e61d03dd462877c9ead514cef957bc63a52702b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 4 Jan 2022 15:38:15 -0500 Subject: [PATCH 079/632] update stale documentation on Mapper class don't get into details here that have changed, point to the main narrative documentation instead. Change-Id: I5bc0829f6ff282ca104f18deb9ec9857b6e1c8ac (cherry picked from commit 9298ce03e1181d5bc00c1891663433606ead1223) --- lib/sqlalchemy/orm/mapper.py | 33 ++++++--------------------------- 1 file changed, 6 insertions(+), 27 deletions(-) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 9ac18bea664..af63c523998 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -91,35 +91,14 @@ class Mapper( sql_base.MemoizedHasCacheKey, InspectionAttr, ): - """Define the correlation of class attributes to database table - columns. + """Defines an association between a Python class and a database table or + other relational structure, so that ORM operations against the class may + proceed. - The :class:`_orm.Mapper` object is instantiated using the - :func:`~sqlalchemy.orm.mapper` function. For information + The :class:`_orm.Mapper` object is instantiated using mapping methods + present on the :class:`_orm.registry` object. For information about instantiating new :class:`_orm.Mapper` objects, see - that function's documentation. - - - When :func:`.mapper` is used - explicitly to link a user defined class with table - metadata, this is referred to as *classical mapping*. - Modern SQLAlchemy usage tends to favor the - :mod:`sqlalchemy.ext.declarative` extension for class - configuration, which - makes usage of :func:`.mapper` behind the scenes. - - Given a particular class known to be mapped by the ORM, - the :class:`_orm.Mapper` which maintains it can be acquired - using the :func:`_sa.inspect` function:: - - from sqlalchemy import inspect - - mapper = inspect(MyClass) - - A class which was mapped by the :mod:`sqlalchemy.ext.declarative` - extension will also have its mapper available via the ``__mapper__`` - attribute. - + :ref:`orm_mapping_classes_toplevel`. """ From f96e24013c80d933cb8171061be3d316215fe585 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 6 Jan 2022 09:24:08 -0500 Subject: [PATCH 080/632] fix incorrect with_parent() example The lead example for the with_parent() function docstring was backwards, based on the standard User/Address mapping used in the documentation. Fixes: #7540 Change-Id: Iaff7dc6fdd0c323509231ae5f3122ed76a420915 (cherry picked from commit 21ee595ba9ef3e7abc8982fac7bf488c904cf9c9) --- lib/sqlalchemy/orm/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index fba483f8904..ae190ec1c14 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1837,7 +1837,7 @@ def with_parent(instance, prop, from_entity=None): E.g.:: - stmt = select(Address).where(with_parent(some_user, Address.user)) + stmt = select(Address).where(with_parent(some_user, User.addresses)) The SQL rendered is the same as that rendered when a lazy loader From 2f370229d63665ffae72a9f8a37800b4d70cf6a1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 6 Jan 2022 15:59:47 -0500 Subject: [PATCH 081/632] happy new year 2022 Change-Id: Ic38dbc640aa0fe8a784a5b5e57c45a41eb0ea01b --- LICENSE | 2 +- doc/build/conf.py | 2 +- doc/build/copyright.rst | 2 +- lib/sqlalchemy/__init__.py | 2 +- lib/sqlalchemy/cextension/immutabledict.c | 2 +- lib/sqlalchemy/cextension/processors.c | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 2 +- lib/sqlalchemy/connectors/__init__.py | 2 +- lib/sqlalchemy/connectors/mxodbc.py | 2 +- lib/sqlalchemy/connectors/pyodbc.py | 2 +- lib/sqlalchemy/databases/__init__.py | 2 +- lib/sqlalchemy/dialects/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/base.py | 2 +- lib/sqlalchemy/dialects/firebird/fdb.py | 2 +- lib/sqlalchemy/dialects/firebird/kinterbasdb.py | 2 +- lib/sqlalchemy/dialects/mssql/__init__.py | 2 +- lib/sqlalchemy/dialects/mssql/base.py | 2 +- lib/sqlalchemy/dialects/mssql/information_schema.py | 2 +- lib/sqlalchemy/dialects/mssql/mxodbc.py | 2 +- lib/sqlalchemy/dialects/mssql/pymssql.py | 2 +- lib/sqlalchemy/dialects/mssql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/__init__.py | 2 +- lib/sqlalchemy/dialects/mysql/aiomysql.py | 2 +- lib/sqlalchemy/dialects/mysql/asyncmy.py | 2 +- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/cymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/enumerated.py | 2 +- lib/sqlalchemy/dialects/mysql/json.py | 2 +- lib/sqlalchemy/dialects/mysql/mariadbconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqlconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqldb.py | 2 +- lib/sqlalchemy/dialects/mysql/oursql.py | 2 +- lib/sqlalchemy/dialects/mysql/pymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/reflection.py | 2 +- lib/sqlalchemy/dialects/mysql/reserved_words.py | 2 +- lib/sqlalchemy/dialects/mysql/types.py | 2 +- lib/sqlalchemy/dialects/oracle/__init__.py | 2 +- lib/sqlalchemy/dialects/oracle/base.py | 2 +- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 2 +- lib/sqlalchemy/dialects/postgresql/__init__.py | 2 +- lib/sqlalchemy/dialects/postgresql/array.py | 2 +- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/dialects/postgresql/dml.py | 2 +- lib/sqlalchemy/dialects/postgresql/ext.py | 2 +- lib/sqlalchemy/dialects/postgresql/hstore.py | 2 +- lib/sqlalchemy/dialects/postgresql/json.py | 2 +- lib/sqlalchemy/dialects/postgresql/pg8000.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py | 2 +- lib/sqlalchemy/dialects/postgresql/pygresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/pypostgresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/ranges.py | 2 +- lib/sqlalchemy/dialects/sqlite/__init__.py | 2 +- lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 2 +- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- lib/sqlalchemy/dialects/sqlite/dml.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlcipher.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlite.py | 2 +- lib/sqlalchemy/dialects/sybase/__init__.py | 2 +- lib/sqlalchemy/dialects/sybase/base.py | 2 +- lib/sqlalchemy/dialects/sybase/mxodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pyodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pysybase.py | 2 +- lib/sqlalchemy/engine/__init__.py | 2 +- lib/sqlalchemy/engine/base.py | 2 +- lib/sqlalchemy/engine/create.py | 2 +- lib/sqlalchemy/engine/cursor.py | 2 +- lib/sqlalchemy/engine/default.py | 2 +- lib/sqlalchemy/engine/events.py | 2 +- lib/sqlalchemy/engine/interfaces.py | 2 +- lib/sqlalchemy/engine/mock.py | 2 +- lib/sqlalchemy/engine/reflection.py | 2 +- lib/sqlalchemy/engine/result.py | 2 +- lib/sqlalchemy/engine/row.py | 2 +- lib/sqlalchemy/engine/strategies.py | 2 +- lib/sqlalchemy/engine/url.py | 2 +- lib/sqlalchemy/engine/util.py | 2 +- lib/sqlalchemy/event/__init__.py | 2 +- lib/sqlalchemy/event/api.py | 2 +- lib/sqlalchemy/event/attr.py | 2 +- lib/sqlalchemy/event/base.py | 2 +- lib/sqlalchemy/event/legacy.py | 2 +- lib/sqlalchemy/event/registry.py | 2 +- lib/sqlalchemy/events.py | 2 +- lib/sqlalchemy/exc.py | 2 +- lib/sqlalchemy/ext/__init__.py | 2 +- lib/sqlalchemy/ext/associationproxy.py | 2 +- lib/sqlalchemy/ext/asyncio/__init__.py | 2 +- lib/sqlalchemy/ext/asyncio/engine.py | 2 +- lib/sqlalchemy/ext/asyncio/events.py | 2 +- lib/sqlalchemy/ext/asyncio/exc.py | 2 +- lib/sqlalchemy/ext/asyncio/result.py | 2 +- lib/sqlalchemy/ext/asyncio/scoping.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 2 +- lib/sqlalchemy/ext/automap.py | 2 +- lib/sqlalchemy/ext/baked.py | 2 +- lib/sqlalchemy/ext/compiler.py | 2 +- lib/sqlalchemy/ext/declarative/__init__.py | 2 +- lib/sqlalchemy/ext/declarative/extensions.py | 2 +- lib/sqlalchemy/ext/horizontal_shard.py | 2 +- lib/sqlalchemy/ext/hybrid.py | 2 +- lib/sqlalchemy/ext/indexable.py | 2 +- lib/sqlalchemy/ext/mutable.py | 2 +- lib/sqlalchemy/ext/orderinglist.py | 2 +- lib/sqlalchemy/ext/serializer.py | 2 +- lib/sqlalchemy/future/__init__.py | 2 +- lib/sqlalchemy/future/orm/__init__.py | 2 +- lib/sqlalchemy/inspection.py | 2 +- lib/sqlalchemy/log.py | 2 +- lib/sqlalchemy/orm/__init__.py | 2 +- lib/sqlalchemy/orm/attributes.py | 2 +- lib/sqlalchemy/orm/base.py | 2 +- lib/sqlalchemy/orm/clsregistry.py | 2 +- lib/sqlalchemy/orm/collections.py | 2 +- lib/sqlalchemy/orm/context.py | 2 +- lib/sqlalchemy/orm/decl_api.py | 2 +- lib/sqlalchemy/orm/decl_base.py | 2 +- lib/sqlalchemy/orm/dependency.py | 2 +- lib/sqlalchemy/orm/descriptor_props.py | 2 +- lib/sqlalchemy/orm/dynamic.py | 2 +- lib/sqlalchemy/orm/evaluator.py | 2 +- lib/sqlalchemy/orm/events.py | 2 +- lib/sqlalchemy/orm/exc.py | 2 +- lib/sqlalchemy/orm/identity.py | 2 +- lib/sqlalchemy/orm/instrumentation.py | 2 +- lib/sqlalchemy/orm/interfaces.py | 2 +- lib/sqlalchemy/orm/loading.py | 2 +- lib/sqlalchemy/orm/mapper.py | 2 +- lib/sqlalchemy/orm/path_registry.py | 2 +- lib/sqlalchemy/orm/persistence.py | 2 +- lib/sqlalchemy/orm/properties.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- lib/sqlalchemy/orm/relationships.py | 2 +- lib/sqlalchemy/orm/scoping.py | 2 +- lib/sqlalchemy/orm/session.py | 2 +- lib/sqlalchemy/orm/state.py | 2 +- lib/sqlalchemy/orm/strategies.py | 2 +- lib/sqlalchemy/orm/strategy_options.py | 2 +- lib/sqlalchemy/orm/sync.py | 2 +- lib/sqlalchemy/orm/unitofwork.py | 2 +- lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/pool/__init__.py | 2 +- lib/sqlalchemy/pool/base.py | 2 +- lib/sqlalchemy/pool/dbapi_proxy.py | 2 +- lib/sqlalchemy/pool/events.py | 2 +- lib/sqlalchemy/pool/impl.py | 2 +- lib/sqlalchemy/processors.py | 2 +- lib/sqlalchemy/schema.py | 2 +- lib/sqlalchemy/sql/__init__.py | 2 +- lib/sqlalchemy/sql/annotation.py | 2 +- lib/sqlalchemy/sql/base.py | 2 +- lib/sqlalchemy/sql/coercions.py | 2 +- lib/sqlalchemy/sql/compiler.py | 2 +- lib/sqlalchemy/sql/crud.py | 2 +- lib/sqlalchemy/sql/ddl.py | 2 +- lib/sqlalchemy/sql/default_comparator.py | 2 +- lib/sqlalchemy/sql/dml.py | 2 +- lib/sqlalchemy/sql/elements.py | 2 +- lib/sqlalchemy/sql/events.py | 2 +- lib/sqlalchemy/sql/expression.py | 2 +- lib/sqlalchemy/sql/functions.py | 2 +- lib/sqlalchemy/sql/lambdas.py | 2 +- lib/sqlalchemy/sql/naming.py | 2 +- lib/sqlalchemy/sql/operators.py | 2 +- lib/sqlalchemy/sql/roles.py | 2 +- lib/sqlalchemy/sql/schema.py | 2 +- lib/sqlalchemy/sql/selectable.py | 2 +- lib/sqlalchemy/sql/sqltypes.py | 2 +- lib/sqlalchemy/sql/type_api.py | 2 +- lib/sqlalchemy/sql/util.py | 2 +- lib/sqlalchemy/sql/visitors.py | 2 +- lib/sqlalchemy/testing/__init__.py | 2 +- lib/sqlalchemy/testing/assertions.py | 2 +- lib/sqlalchemy/testing/assertsql.py | 2 +- lib/sqlalchemy/testing/asyncio.py | 2 +- lib/sqlalchemy/testing/config.py | 2 +- lib/sqlalchemy/testing/engines.py | 2 +- lib/sqlalchemy/testing/entities.py | 2 +- lib/sqlalchemy/testing/exclusions.py | 2 +- lib/sqlalchemy/testing/fixtures.py | 2 +- lib/sqlalchemy/testing/mock.py | 2 +- lib/sqlalchemy/testing/pickleable.py | 2 +- lib/sqlalchemy/testing/plugin/plugin_base.py | 2 +- lib/sqlalchemy/testing/profiling.py | 2 +- lib/sqlalchemy/testing/requirements.py | 2 +- lib/sqlalchemy/testing/schema.py | 2 +- lib/sqlalchemy/testing/util.py | 2 +- lib/sqlalchemy/testing/warnings.py | 2 +- lib/sqlalchemy/types.py | 2 +- lib/sqlalchemy/util/__init__.py | 2 +- lib/sqlalchemy/util/_collections.py | 2 +- lib/sqlalchemy/util/_compat_py3k.py | 2 +- lib/sqlalchemy/util/_concurrency_py3k.py | 2 +- lib/sqlalchemy/util/_preloaded.py | 2 +- lib/sqlalchemy/util/compat.py | 2 +- lib/sqlalchemy/util/concurrency.py | 2 +- lib/sqlalchemy/util/deprecations.py | 2 +- lib/sqlalchemy/util/langhelpers.py | 2 +- lib/sqlalchemy/util/queue.py | 2 +- lib/sqlalchemy/util/topological.py | 2 +- 203 files changed, 203 insertions(+), 203 deletions(-) diff --git a/LICENSE b/LICENSE index 0d9fb6dc4b1..c933e4b1ae0 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2005-2021 SQLAlchemy authors and contributors . +Copyright 2005-2022 SQLAlchemy authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/build/conf.py b/doc/build/conf.py index 74b5a8014f2..2db20708000 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -196,7 +196,7 @@ # General information about the project. project = u"SQLAlchemy" -copyright = u"2007-2021, the SQLAlchemy authors and contributors" # noqa +copyright = u"2007-2022, the SQLAlchemy authors and contributors" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/build/copyright.rst b/doc/build/copyright.rst index b38d3ae2963..59df651312d 100644 --- a/doc/build/copyright.rst +++ b/doc/build/copyright.rst @@ -6,7 +6,7 @@ Appendix: Copyright This is the MIT license: ``_ -Copyright (c) 2005-2021 Michael Bayer and contributors. +Copyright (c) 2005-2022 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index f4241074e51..a860e7a04ee 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ # sqlalchemy/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/immutabledict.c b/lib/sqlalchemy/cextension/immutabledict.c index 1188dcd2baf..53e1ab34934 100644 --- a/lib/sqlalchemy/cextension/immutabledict.c +++ b/lib/sqlalchemy/cextension/immutabledict.c @@ -1,6 +1,6 @@ /* immuatbledict.c -Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +Copyright (C) 2005-2022 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c index f6f203e7499..afe4234b92c 100644 --- a/lib/sqlalchemy/cextension/processors.c +++ b/lib/sqlalchemy/cextension/processors.c @@ -1,6 +1,6 @@ /* processors.c -Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +Copyright (C) 2010-2022 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 2de672f22b9..c071ff31731 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -1,6 +1,6 @@ /* resultproxy.c -Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +Copyright (C) 2010-2022 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py index fee8b3836f5..e738086e6fa 100644 --- a/lib/sqlalchemy/connectors/__init__.py +++ b/lib/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 1c2fb00c043..89b348433f5 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index c2bbdf7ce91..7a97aa16c78 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py index 01768042591..fa832298359 100644 --- a/lib/sqlalchemy/databases/__init__.py +++ b/lib/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py index e06eb099524..84a9ad81fc4 100644 --- a/lib/sqlalchemy/dialects/__init__.py +++ b/lib/sqlalchemy/dialects/__init__.py @@ -1,5 +1,5 @@ # dialects/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py index d4a054c3bf2..a34eecf9def 100644 --- a/lib/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ # firebird/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index 91e2c04a7eb..e2698b17817 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ # firebird/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index 4687809dfef..38f443267c7 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ # firebird/fdb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index 102222de0ac..b999404641f 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ # firebird/kinterbasdb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py index 3aa1e344a6e..cae01682c81 100644 --- a/lib/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ # mssql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index defff295284..9f15aa8e396 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ # mssql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index fa0386faad3..df914936899 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ # mssql/information_schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index 3f3fe4ed120..95c32d45298 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ # mssql/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index b559384ba0a..84c5fed6f50 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ # mssql/pymssql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 0a56a03de69..4c164a73f20 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ # mssql/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index c83fec0c394..04c83d161ee 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ # mysql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index 93d2360580d..975467c24f7 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -1,5 +1,5 @@ # mysql/aiomysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 0fca338f561..9afd41bb3be 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -1,5 +1,5 @@ # mysql/asyncmy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 6d9c22e9628..3fa1204207b 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ # mysql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index f729e4a18c9..a67a194a991 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ # mysql/cymysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index 9f9a838c5df..9857a820e66 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -1,5 +1,5 @@ # mysql/enumerated.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py index 8d052cc7c0d..857fccebd4a 100644 --- a/lib/sqlalchemy/dialects/mysql/json.py +++ b/lib/sqlalchemy/dialects/mysql/json.py @@ -1,5 +1,5 @@ # mysql/json.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index 14ed11b1999..f3130488780 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -1,5 +1,5 @@ # mysql/mariadbconnector.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index e17da317456..356babe70fd 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ # mysql/mysqlconnector.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index dfe719c28da..7a721e8e67e 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ # mysql/mysqldb.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index 6ec7ce9b9ec..f6287dc7aea 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ # mysql/oursql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 1d2c3be2d73..f6201333215 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ # mysql/pymysql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 9cae7b71452..d5a5c0c9dc4 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ # mysql/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index 503c9614c0a..27394bbe9fc 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -1,5 +1,5 @@ # mysql/reflection.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reserved_words.py b/lib/sqlalchemy/dialects/mysql/reserved_words.py index e2c39852d80..995168bbb09 100644 --- a/lib/sqlalchemy/dialects/mysql/reserved_words.py +++ b/lib/sqlalchemy/dialects/mysql/reserved_words.py @@ -1,5 +1,5 @@ # mysql/reserved_words.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index dee58b4a531..b81ee95ac1d 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -1,5 +1,5 @@ # mysql/types.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py index 3d4aca1364a..c83e0573d68 100644 --- a/lib/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ # oracle/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 8b790c70c69..49ee47959ab 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ # oracle/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 3e705dced33..104b88bc0de 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 0de84e5797d..12d9e94443d 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -1,5 +1,5 @@ # postgresql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 4f296e8ef13..568e5b7b065 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -1,5 +1,5 @@ # postgresql/array.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index fedc0b495b4..f2409eeab8e 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1,5 +1,5 @@ # postgresql/asyncpg.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 4273ff474af..04b79a7338e 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ # postgresql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index 4104fe51f78..b483774db3e 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -1,5 +1,5 @@ # postgresql/on_conflict.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index 8c3a539be04..c3bda33b92a 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -1,5 +1,5 @@ # postgresql/ext.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 7f42c3ab4e0..29800d2e39b 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ # postgresql/hstore.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index 2acf177f539..daaaeacc121 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ # postgresql/json.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index a94f9dcdbb0..98561a9b99b 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ # postgresql/pg8000.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 4143dd041d6..19d7b06ac9f 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ # postgresql/psycopg2.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py index 5be52a8707b..10d1aae5d28 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py index 42ef3c31e02..d273b8c5be0 100644 --- a/lib/sqlalchemy/dialects/postgresql/pygresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -1,5 +1,5 @@ # postgresql/pygresql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index 1d646df44a4..886e368c5a2 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ # postgresql/pypostgresql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index e7129aebb5b..35cf360cff0 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -1,4 +1,4 @@ -# Copyright (C) 2013-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2013-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py index 6e3ad0e668b..8d8d933b912 100644 --- a/lib/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -1,5 +1,5 @@ # sqlite/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index 4319e26611d..8e621c8e2e5 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -1,5 +1,5 @@ # sqlite/aiosqlite.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index e936c9080a0..bcea17620f3 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ # sqlite/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index e4d8bd9434d..b04a5e6eb62 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py index 3765191c1bc..65f94c81353 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -1,5 +1,5 @@ # sqlite/pysqlcipher.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index e9d5d96827f..1aae5610dfc 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ # sqlite/pysqlite.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py index 87a90fb0623..c7755c8e767 100644 --- a/lib/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ # sybase/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 120093015c5..83248d10c63 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ # sybase/base.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py index 4e8c8aeab19..fe5a61460fb 100644 --- a/lib/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ # sybase/mxodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index afc315f264e..f408e8f9c36 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ # sybase/pyodbc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index 0f408e80159..4c96aacd78b 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ # sybase/pysybase.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 6306e201d0c..488e41de33b 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index a5d973a2c2a..0e695f65a55 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 5e56ecdd9f8..7816a301061 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -1,5 +1,5 @@ # engine/create.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 79f87fc0e04..7e8c0d7c9f6 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1,5 +1,5 @@ # engine/cursor.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 0dac6600ef3..fae13a19b1d 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index f091c7733a8..ca70f037a63 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -1,5 +1,5 @@ # sqlalchemy/engine/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index d1484718eb6..0bfd8fb8b59 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/mock.py b/lib/sqlalchemy/engine/mock.py index 803fe30a285..6fcb09f1b7d 100644 --- a/lib/sqlalchemy/engine/mock.py +++ b/lib/sqlalchemy/engine/mock.py @@ -1,5 +1,5 @@ # engine/mock.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index 113aa8ea069..ad50a3e3160 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 7cf0bd3f966..4264d6d8581 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index 02fc560ca4d..c76632db1c0 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -1,5 +1,5 @@ # engine/row.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index bda1c7fae9e..54a5e51c160 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index a73b81a319b..5c46676f77c 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index 660ffafa0a2..1b03ebbf0ad 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/__init__.py b/lib/sqlalchemy/event/__init__.py index 15aae8d6d76..a89bea894e2 100644 --- a/lib/sqlalchemy/event/__init__.py +++ b/lib/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py index 5487c9f1afe..ce44f571ba5 100644 --- a/lib/sqlalchemy/event/api.py +++ b/lib/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index a0c2992213c..0d16165c4ee 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/base.py b/lib/sqlalchemy/event/base.py index f8cbfbd7f62..510e16bddfe 100644 --- a/lib/sqlalchemy/event/base.py +++ b/lib/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index 0dbf695048f..d9f6ce57354 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py index ca85f33684e..ac143c44d33 100644 --- a/lib/sqlalchemy/event/registry.py +++ b/lib/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index 8c0c5ff8d59..d17b0b12f59 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -1,5 +1,5 @@ # sqlalchemy/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py index 7fa77120c65..78bcef3a02f 100644 --- a/lib/sqlalchemy/exc.py +++ b/lib/sqlalchemy/exc.py @@ -1,5 +1,5 @@ # sqlalchemy/exc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/__init__.py b/lib/sqlalchemy/ext/__init__.py index a4a9b34ab0c..62bbbf3ceb2 100644 --- a/lib/sqlalchemy/ext/__init__.py +++ b/lib/sqlalchemy/ext/__init__.py @@ -1,5 +1,5 @@ # ext/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index dd5c10ac956..9a73bb5c2cd 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index 03103971375..15b2cb015b7 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -1,5 +1,5 @@ # ext/asyncio/__init__.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 0b212830e60..0939395c18b 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -1,5 +1,5 @@ # ext/asyncio/engine.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index e3d8456908c..a059b93e6b9 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -1,5 +1,5 @@ # ext/asyncio/events.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/exc.py b/lib/sqlalchemy/ext/asyncio/exc.py index fc53f5c4b8b..cf0d9a85daf 100644 --- a/lib/sqlalchemy/ext/asyncio/exc.py +++ b/lib/sqlalchemy/ext/asyncio/exc.py @@ -1,5 +1,5 @@ # ext/asyncio/exc.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index dff87a569dd..81ef9915c52 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -1,5 +1,5 @@ # ext/asyncio/result.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 4e7f15c1fda..535c30d6154 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -1,5 +1,5 @@ # ext/asyncio/scoping.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 96131926917..67db7110610 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -1,5 +1,5 @@ # ext/asyncio/session.py -# Copyright (C) 2020-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index 7cb2c4400b4..a586ae1c4ca 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py index 61328fce95c..109e0c0c3e5 100644 --- a/lib/sqlalchemy/ext/baked.py +++ b/lib/sqlalchemy/ext/baked.py @@ -1,5 +1,5 @@ # sqlalchemy/ext/baked.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index 3470407158a..b97b23e660e 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/__init__.py b/lib/sqlalchemy/ext/declarative/__init__.py index b1c1d369123..6215e35d83d 100644 --- a/lib/sqlalchemy/ext/declarative/__init__.py +++ b/lib/sqlalchemy/ext/declarative/__init__.py @@ -1,5 +1,5 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 1a12b1205fc..6286091b1d5 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -1,5 +1,5 @@ # ext/declarative/extensions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py index 5f13ad26890..bad076e35b0 100644 --- a/lib/sqlalchemy/ext/horizontal_shard.py +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index eab3f2b7385..8a4a7234681 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/indexable.py b/lib/sqlalchemy/ext/indexable.py index 313ad11af6e..7cbac542b15 100644 --- a/lib/sqlalchemy/ext/indexable.py +++ b/lib/sqlalchemy/ext/indexable.py @@ -1,5 +1,5 @@ # ext/index.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 4eed3b2afe2..b5217a42677 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index a5c418e722e..5a327d1a522 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 18a54e0798a..094b71b0039 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py index 9bf4d042df5..a2bed07f1ba 100644 --- a/lib/sqlalchemy/future/__init__.py +++ b/lib/sqlalchemy/future/__init__.py @@ -1,5 +1,5 @@ # sql/future/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/orm/__init__.py b/lib/sqlalchemy/future/orm/__init__.py index 89b490d7136..629631b3e0a 100644 --- a/lib/sqlalchemy/future/orm/__init__.py +++ b/lib/sqlalchemy/future/orm/__init__.py @@ -1,5 +1,5 @@ # sql/future/orm/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/inspection.py b/lib/sqlalchemy/inspection.py index 40b746655cd..7f9822d02e9 100644 --- a/lib/sqlalchemy/inspection.py +++ b/lib/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ # sqlalchemy/inspect.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 9ec3842a6df..55511c27b82 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -1,5 +1,5 @@ # sqlalchemy/log.py -# Copyright (C) 2006-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2006-2022 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index bdc5cbf674d..78650507ee6 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 19e0d545e6d..2a1d3a2c3a2 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index 6553cf66981..8e94d7b3845 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 4ec31bcf74e..104d7c306f0 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -1,5 +1,5 @@ # ext/declarative/clsregistry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index 351069f9a5d..a189f02dabe 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index edbe45ac210..a81bf557399 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1,5 +1,5 @@ # orm/context.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index e90b91a5a49..452f2eaf533 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -1,5 +1,5 @@ # ext/declarative/api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index bf1bc537da4..6f02e569774 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -1,5 +1,5 @@ # ext/declarative/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 27919050ece..1b5be9a7ec7 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py index 535067d88d0..3d7f23be1ca 100644 --- a/lib/sqlalchemy/orm/descriptor_props.py +++ b/lib/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index 405498aaf69..5d74bbffd53 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index 69d80dd8bdb..dbbfba09f01 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 2c8d155ad88..1514a2d433d 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py index dbb499d5dc0..8dd4d90d686 100644 --- a/lib/sqlalchemy/orm/exc.py +++ b/lib/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py index 6aea0d18547..7de8e2cdec6 100644 --- a/lib/sqlalchemy/orm/identity.py +++ b/lib/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index 02fc7379322..97692b6421c 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 6182588dce6..63295d0b9e4 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index bbad98144d2..7dee717c68d 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index af63c523998..984e4de979a 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index 6bebbd006e1..331ddd7dc54 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 4ba1917f63f..dc7b4012e75 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index fa230d10930..b5ac9b87945 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index b412caa12f6..ab230f66f1d 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index d021ac9a298..7949e9e4d35 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index 7b228945317..f3232334c74 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 5f1560a6974..49e8060d082 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 994cbe53e7b..3bb59277983 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 679b35a21ed..b6d74585d0b 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 0ea0df57c03..c3dd5df3b55 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index 9d684a2a872..c0418045d54 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index f29d11bcd59..22576372d4d 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index ae190ec1c14..c574a39a4bd 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py index 5b4f4ebb101..6a00ef85088 100644 --- a/lib/sqlalchemy/pool/__init__.py +++ b/lib/sqlalchemy/pool/__init__.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index 38b0f67cb88..cde28c2fb02 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -1,5 +1,5 @@ # sqlalchemy/pool.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py index 7dfb59e36e9..b0c40f2ab7d 100644 --- a/lib/sqlalchemy/pool/dbapi_proxy.py +++ b/lib/sqlalchemy/pool/dbapi_proxy.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/dbapi_proxy.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index 7c2cae7c5eb..8dd99bb84aa 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py index 3ef33d02d2b..91d02909556 100644 --- a/lib/sqlalchemy/pool/impl.py +++ b/lib/sqlalchemy/pool/impl.py @@ -1,5 +1,5 @@ # sqlalchemy/pool.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py index 0c0aa1bd6c4..e7f388fc6da 100644 --- a/lib/sqlalchemy/processors.py +++ b/lib/sqlalchemy/processors.py @@ -1,5 +1,5 @@ # sqlalchemy/processors.py -# Copyright (C) 2010-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index eeb7f751abd..61f82bba019 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py index f374d555d5f..26774416185 100644 --- a/lib/sqlalchemy/sql/__init__.py +++ b/lib/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index 3c02ccb26fa..5c000ed6c3f 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index e04d3b75f0c..52339e35a73 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index e378d9345f2..b3974c3d360 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -1,5 +1,5 @@ # sql/coercions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 867bb4dcd7e..78db3cb2be6 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index a9c9cb4c133..804777c29ee 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -1,5 +1,5 @@ # sql/crud.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index b79fee17931..bf44bfdb145 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 036a96e9fd2..7d2f1dd2a4a 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index ebff0df88d1..be5714451ba 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 48b64545319..0aab04139f1 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index db80b51e352..c42578986de 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -1,5 +1,5 @@ # sqlalchemy/sql/events.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index 129e628ab8b..b4aa14e1f9d 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -1,5 +1,5 @@ # sql/expression.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 4f3cf65b471..8c07bc06699 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -1,5 +1,5 @@ # sql/functions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 03cd05f0202..e22f8716710 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -1,5 +1,5 @@ # sql/lambdas.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/naming.py b/lib/sqlalchemy/sql/naming.py index d01eabb5883..b7ad221d2c8 100644 --- a/lib/sqlalchemy/sql/naming.py +++ b/lib/sqlalchemy/sql/naming.py @@ -1,5 +1,5 @@ # sqlalchemy/naming.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 695e086b88f..31a2a01a734 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1,5 +1,5 @@ # sql/operators.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index 6f25ec97570..9e146f7ff1a 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -1,5 +1,5 @@ # sql/roles.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 83d98a05818..943ed145251 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1,5 +1,5 @@ # sql/schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 7d18113219e..51e0ae1578e 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1,5 +1,5 @@ # sql/selectable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 3f3801ab009..803f59f5b68 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1,5 +1,5 @@ # sql/sqltypes.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 49f6cfe204a..cb94969c190 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1,5 +1,5 @@ # sql/types_api.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 7fcb45709f5..7f3ef744c9c 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -1,5 +1,5 @@ # sql/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index 3636be4be61..f72d83a4c74 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -1,5 +1,5 @@ # sql/visitors.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index d78e241819e..30babae83de 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -1,5 +1,5 @@ # testing/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index ea453813a55..02e68802282 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -1,5 +1,5 @@ # testing/assertions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index 4ee4c5844b0..565b3ed7675 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -1,5 +1,5 @@ # testing/assertsql.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index b964ac57ceb..21890604a37 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -1,5 +1,5 @@ # testing/asyncio.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index 097eb94e413..fc13a165579 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -1,5 +1,5 @@ # testing/config.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index a54f70c5e08..2fb81a5bfa7 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/entities.py b/lib/sqlalchemy/testing/entities.py index 9daa5c61f80..8ea65d66933 100644 --- a/lib/sqlalchemy/testing/entities.py +++ b/lib/sqlalchemy/testing/entities.py @@ -1,5 +1,5 @@ # testing/entities.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index d5522289b4f..521a4aa7be7 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -1,5 +1,5 @@ # testing/exclusions.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index f04056c5e5e..ff5c3dd101f 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -1,5 +1,5 @@ # testing/fixtures.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/mock.py b/lib/sqlalchemy/testing/mock.py index 8fe08a6789f..e333c7007ef 100644 --- a/lib/sqlalchemy/testing/mock.py +++ b/lib/sqlalchemy/testing/mock.py @@ -1,5 +1,5 @@ # testing/mock.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index 430cb5fb687..04405e53974 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -1,5 +1,5 @@ # testing/pickleable.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index 36390c590a7..d59564e8e01 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -1,5 +1,5 @@ # plugin/plugin_base.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index de4847f2f3d..41326303afb 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -1,5 +1,5 @@ # testing/profiling.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index a0f262a760a..b611c17a898 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1,5 +1,5 @@ # testing/requirements.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py index 9c6bf9e4c29..bff07a5c900 100644 --- a/lib/sqlalchemy/testing/schema.py +++ b/lib/sqlalchemy/testing/schema.py @@ -1,5 +1,5 @@ # testing/schema.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index a4d55a8f2cb..be89bc6e448 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -1,5 +1,5 @@ # testing/util.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index b5842ad6942..db780f40030 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -1,5 +1,5 @@ # testing/warnings.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index 9e695f6782b..07263c5b9ee 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -1,5 +1,5 @@ # types.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index e4e79294f20..497edb3b172 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -1,5 +1,5 @@ # util/__init__.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index 535ae47802f..8e218303be7 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1,5 +1,5 @@ # util/_collections.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_compat_py3k.py b/lib/sqlalchemy/util/_compat_py3k.py index cd9f3ebc34f..ce659a41d14 100644 --- a/lib/sqlalchemy/util/_compat_py3k.py +++ b/lib/sqlalchemy/util/_compat_py3k.py @@ -1,5 +1,5 @@ # util/_compat_py3k.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index 55fe87c6a78..e3c5dac5800 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -1,5 +1,5 @@ # util/_concurrency_py3k.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_preloaded.py b/lib/sqlalchemy/util/_preloaded.py index c8da9230a87..1803de40227 100644 --- a/lib/sqlalchemy/util/_preloaded.py +++ b/lib/sqlalchemy/util/_preloaded.py @@ -1,5 +1,5 @@ # util/_preloaded.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 5914e8681aa..c60d8842147 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -1,5 +1,5 @@ # util/compat.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index ebd845cebb2..9eb44f4657e 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -1,5 +1,5 @@ # util/concurrency.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index 4d3e04fde85..fe2f968040a 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -1,5 +1,5 @@ # util/deprecations.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index 89ca4c1ebf6..68074dc3354 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1,5 +1,5 @@ # util/langhelpers.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py index 12b37220237..67c5219c724 100644 --- a/lib/sqlalchemy/util/queue.py +++ b/lib/sqlalchemy/util/queue.py @@ -1,5 +1,5 @@ # util/queue.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py index ae4b37426bb..bbc819fc317 100644 --- a/lib/sqlalchemy/util/topological.py +++ b/lib/sqlalchemy/util/topological.py @@ -1,5 +1,5 @@ # util/topological.py -# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under From f572b00d25b0c76f088dc27c45c31b97000eb3b6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Jan 2022 14:59:59 -0500 Subject: [PATCH 082/632] ensure with_options not switched to a list Fixed regression which appeared in 1.4.23 which could cause loader options to be mis-handled in some cases, in particular when using joined table inheritance in combination with the ``polymorphic_load="selectin"`` option as well as relationship lazy loading, leading to a ``TypeError``. Fixes: #7557 Change-Id: Id38619692f94308fd5f567a02337efef7a3a7544 (cherry picked from commit 4e9fe6e3b7a72fc3b116403ea9b27e847b5bf186) --- doc/build/changelog/unreleased_14/7557.rst | 9 +++ lib/sqlalchemy/orm/strategies.py | 2 +- test/orm/inheritance/test_poly_loading.py | 85 ++++++++++++++++++++-- 3 files changed, 87 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7557.rst diff --git a/doc/build/changelog/unreleased_14/7557.rst b/doc/build/changelog/unreleased_14/7557.rst new file mode 100644 index 00000000000..b7ccc87cf27 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7557.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7557 + + Fixed regression which appeared in 1.4.23 which could cause loader options + to be mis-handled in some cases, in particular when using joined table + inheritance in combination with the ``polymorphic_load="selectin"`` option + as well as relationship lazy loading, leading to a ``TypeError``. + diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index b6d74585d0b..71aae00807a 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -974,7 +974,7 @@ def _emit_lazyload( if state.load_options or (loadopt and loadopt._extra_criteria): effective_path = state.load_path[self.parent_property] - opts = list(state.load_options) + opts = tuple(state.load_options) if loadopt and loadopt._extra_criteria: use_get = False diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 00537a1fc57..1a1838ad68b 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -1,18 +1,22 @@ from sqlalchemy import exc from sqlalchemy import ForeignKey +from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.orm import backref from sqlalchemy.orm import defaultload +from sqlalchemy.orm import immediateload from sqlalchemy.orm import joinedload from sqlalchemy.orm import lazyload from sqlalchemy.orm import relationship from sqlalchemy.orm import selectin_polymorphic from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session +from sqlalchemy.orm import subqueryload from sqlalchemy.orm import with_polymorphic +from sqlalchemy.orm.interfaces import CompileStateOption from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.testing import assertsql from sqlalchemy.testing import eq_ @@ -588,17 +592,11 @@ def insert_data(cls, connection): session.add_all([parent, subclass1, other]) session.commit() - def test_options_dont_pollute_baked(self): - self._test_options_dont_pollute(True) - - def test_options_dont_pollute_unbaked(self): - self._test_options_dont_pollute(False) - - def _test_options_dont_pollute(self, enable_baked): + def test_options_dont_pollute(self): Parent, ChildSubclass1, Other = self.classes( "Parent", "ChildSubclass1", "Other" ) - session = fixture_session(enable_baked_queries=enable_baked) + session = fixture_session() def no_opt(): q = session.query(Parent).options( @@ -854,3 +852,74 @@ def test_subclass_loadattr( ) asserter_.assert_(*expected) + + +class LazyLoaderTransfersOptsTest(fixtures.DeclarativeMappedTest): + """test #7557""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Address(Base): + __tablename__ = "address" + + id = Column(Integer, primary_key=True) + user_id = Column(Integer, ForeignKey("user.id")) + address_type = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "base_address", + "polymorphic_on": address_type, + } + + class EmailAddress(Address): + __tablename__ = "email_address" + email = Column(String(50)) + address_id = Column( + Integer, + ForeignKey(Address.id), + primary_key=True, + ) + + __mapper_args__ = { + "polymorphic_identity": "email", + "polymorphic_load": "selectin", + } + + class User(Base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String(50)) + address = relationship(Address, uselist=False) + + @classmethod + def insert_data(cls, connection): + User, EmailAddress = cls.classes("User", "EmailAddress") + with Session(connection) as sess: + sess.add_all( + [User(name="u1", address=EmailAddress(email="foo", user_id=1))] + ) + + sess.commit() + + @testing.combinations( + None, selectinload, joinedload, lazyload, subqueryload, immediateload + ) + def test_opt_propagates(self, strat): + User, EmailAddress = self.classes("User", "EmailAddress") + sess = fixture_session() + + class AnyOpt(CompileStateOption): + _cache_key_traversal = () + propagate_to_loaders = True + + any_opt = AnyOpt() + if strat is None: + opts = (any_opt,) + else: + opts = (strat(User.address), any_opt) + + u = sess.execute(select(User).options(*opts)).scalars().one() + address = u.address + eq_(inspect(address).load_options, set(opts)) From e215db01d48c418e190936e6b36ea49c6eb22072 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Jan 2022 12:20:46 -0500 Subject: [PATCH 083/632] implement second-level type resolution for literals Added additional rule to the system that determines ``TypeEngine`` implementations from Python literals to apply a second level of adjustment to the type, so that a Python datetime with or without tzinfo can set the ``timezone=True`` parameter on the returned :class:`.DateTime` object, as well as :class:`.Time`. This helps with some round-trip scenarios on type-sensitive PostgreSQL dialects such as asyncpg, psycopg3 (2.0 only). Improved support for asyncpg handling of TIME WITH TIMEZONE, which was not fully implemented. Fixes: #7537 Change-Id: Icdb07db85af5f7f39f1c1ef855fe27609770094b (cherry picked from commit 3b2e28bcb5ba32446a92b62b6862b7c11dabb592) --- doc/build/changelog/unreleased_14/7537.rst | 17 ++++++++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 7 +++- lib/sqlalchemy/sql/sqltypes.py | 18 +++++++++- lib/sqlalchemy/sql/type_api.py | 11 +++++++ lib/sqlalchemy/testing/requirements.py | 33 +++++++++++++++++++ lib/sqlalchemy/testing/suite/test_types.py | 29 ++++++++++++++++ test/requirements.py | 25 ++++++++++++++ 7 files changed, 138 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7537.rst diff --git a/doc/build/changelog/unreleased_14/7537.rst b/doc/build/changelog/unreleased_14/7537.rst new file mode 100644 index 00000000000..d48cf30a077 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7537.rst @@ -0,0 +1,17 @@ +.. change:: + :tags: bug, sql, postgresql + :tickets: 7537 + + Added additional rule to the system that determines ``TypeEngine`` + implementations from Python literals to apply a second level of adjustment + to the type, so that a Python datetime with or without tzinfo can set the + ``timezone=True`` parameter on the returned :class:`.DateTime` object, as + well as :class:`.Time`. This helps with some round-trip scenarios on + type-sensitive PostgreSQL dialects such as asyncpg, psycopg3 (2.0 only). + +.. change:: + :tags: bug, postgresql, asyncpg + :tickets: 7537 + + Improved support for asyncpg handling of TIME WITH TIMEZONE, which + was not fully implemented. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index fedc0b495b4..f32192b3c8b 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -136,7 +136,10 @@ class AsyncpgTime(sqltypes.Time): def get_dbapi_type(self, dbapi): - return dbapi.TIME + if self.timezone: + return dbapi.TIME_W_TZ + else: + return dbapi.TIME class AsyncpgDate(sqltypes.Date): @@ -818,6 +821,7 @@ def Binary(self, value): TIMESTAMP = util.symbol("TIMESTAMP") TIMESTAMP_W_TZ = util.symbol("TIMESTAMP_W_TZ") TIME = util.symbol("TIME") + TIME_W_TZ = util.symbol("TIME_W_TZ") DATE = util.symbol("DATE") INTERVAL = util.symbol("INTERVAL") NUMBER = util.symbol("NUMBER") @@ -843,6 +847,7 @@ def Binary(self, value): AsyncAdapt_asyncpg_dbapi.TIMESTAMP_W_TZ: "timestamp with time zone", AsyncAdapt_asyncpg_dbapi.DATE: "date", AsyncAdapt_asyncpg_dbapi.TIME: "time", + AsyncAdapt_asyncpg_dbapi.TIME_W_TZ: "time with time zone", AsyncAdapt_asyncpg_dbapi.INTERVAL: "interval", AsyncAdapt_asyncpg_dbapi.NUMBER: "numeric", AsyncAdapt_asyncpg_dbapi.FLOAT: "float", diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 3f3801ab009..c80b10fcc34 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -867,6 +867,13 @@ def __init__(self, timezone=False): def get_dbapi_type(self, dbapi): return dbapi.DATETIME + def _resolve_for_literal(self, value): + with_timezone = value.tzinfo is not None + if with_timezone and not self.timezone: + return DATETIME_TIMEZONE + else: + return self + @property def python_type(self): return dt.datetime @@ -937,6 +944,13 @@ def get_dbapi_type(self, dbapi): def python_type(self): return dt.time + def _resolve_for_literal(self, value): + with_timezone = value.tzinfo is not None + if with_timezone and not self.timezone: + return TIME_TIMEZONE + else: + return self + @util.memoized_property def _expression_adaptations(self): # Based on https://www.postgresql.org/docs/current/\ @@ -3254,6 +3268,8 @@ class MatchType(Boolean): INTEGERTYPE = Integer() MATCHTYPE = MatchType() TABLEVALUE = TableValueType() +DATETIME_TIMEZONE = DateTime(timezone=True) +TIME_TIMEZONE = Time(timezone=True) _type_map = { int: Integer(), @@ -3296,7 +3312,7 @@ def _resolve_value_to_type(value): ) return NULLTYPE else: - return _result_type + return _result_type._resolve_for_literal(value) # back-assign to type_api diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 49f6cfe204a..ecf68e62dd4 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -545,6 +545,17 @@ def with_variant(self, type_, dialect_name): """ return Variant(self, {dialect_name: to_instance(type_)}) + def _resolve_for_literal(self, value): + """adjust this type given a literal Python value that will be + stored in a bound parameter. + + Used exclusively by _resolve_value_to_type(). + + .. versionadded:: 1.4.30 or 2.0 + + """ + return self + @util.memoized_property def _type_affinity(self): """Return a rudimental 'affinity' value expressing the general class diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index a0f262a760a..1c8858ec141 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -753,6 +753,29 @@ def datetime(self): return exclusions.open() + @property + def datetime_timezone(self): + """target dialect supports representation of Python + datetime.datetime() with tzinfo with DateTime(timezone=True).""" + + return exclusions.closed() + + @property + def time_timezone(self): + """target dialect supports representation of Python + datetime.time() with tzinfo with Time(timezone=True).""" + + return exclusions.closed() + + @property + def datetime_implicit_bound(self): + """target dialect when given a datetime object will bind it such + that the database server knows the object is a datetime, and not + a plain string. + + """ + return exclusions.open() + @property def datetime_microseconds(self): """target dialect supports representation of Python @@ -767,6 +790,16 @@ def timestamp_microseconds(self): if TIMESTAMP is used.""" return exclusions.closed() + @property + def timestamp_microseconds_implicit_bound(self): + """target dialect when given a datetime object which also includes + a microseconds portion when using the TIMESTAMP data type + will bind it such that the database server knows + the object is a datetime with microseconds, and not a plain string. + + """ + return self.timestamp_microseconds + @property def datetime_historic(self): """target dialect supports representation of Python diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index d62b608095a..2fdea5e48e7 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -41,6 +41,7 @@ from ... import util from ...orm import declarative_base from ...orm import Session +from ...util import compat from ...util import u @@ -308,6 +309,11 @@ class Decorated(TypeDecorator): Column("decorated_date_data", Decorated), ) + @testing.requires.datetime_implicit_bound + def test_select_direct(self, connection): + result = connection.scalar(select(literal(self.data))) + eq_(result, self.data) + def test_round_trip(self, connection): date_table = self.tables.date_table @@ -382,6 +388,15 @@ class DateTimeTest(_DateFixture, fixtures.TablesTest): data = datetime.datetime(2012, 10, 15, 12, 57, 18) +class DateTimeTZTest(_DateFixture, fixtures.TablesTest): + __requires__ = ("datetime_timezone",) + __backend__ = True + datatype = DateTime(timezone=True) + data = datetime.datetime( + 2012, 10, 15, 12, 57, 18, tzinfo=compat.timezone.utc + ) + + class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("datetime_microseconds",) __backend__ = True @@ -395,6 +410,11 @@ class TimestampMicrosecondsTest(_DateFixture, fixtures.TablesTest): datatype = TIMESTAMP data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396) + @testing.requires.timestamp_microseconds_implicit_bound + def test_select_direct(self, connection): + result = connection.scalar(select(literal(self.data))) + eq_(result, self.data) + class TimeTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time",) @@ -403,6 +423,13 @@ class TimeTest(_DateFixture, fixtures.TablesTest): data = datetime.time(12, 57, 18) +class TimeTZTest(_DateFixture, fixtures.TablesTest): + __requires__ = ("time_timezone",) + __backend__ = True + datatype = Time(timezone=True) + data = datetime.time(12, 57, 18, tzinfo=compat.timezone.utc) + + class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest): __requires__ = ("time_microseconds",) __backend__ = True @@ -1424,6 +1451,7 @@ def test_string_cast_crit_against_string_basic(self): "JSONLegacyStringCastIndexTest", "DateTest", "DateTimeTest", + "DateTimeTZTest", "TextTest", "NumericTest", "IntegerTest", @@ -1433,6 +1461,7 @@ def test_string_cast_crit_against_string_basic(self): "TimeMicrosecondsTest", "TimestampMicrosecondsTest", "TimeTest", + "TimeTZTest", "DateTimeMicrosecondsTest", "DateHistoricTest", "StringTest", diff --git a/test/requirements.py b/test/requirements.py index 006c523a69d..bf83b83b48b 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1126,6 +1126,27 @@ def datetime(self): return exclusions.open() + @property + def datetime_implicit_bound(self): + """target dialect when given a datetime object will bind it such + that the database server knows the object is a datetime, and not + a plain string. + + """ + # pg8000 works in main / 2.0, support in 1.4 is not fully + # present. + return exclusions.skip_if("postgresql+pg8000") + exclusions.fails_on( + ["mysql", "mariadb"] + ) + + @property + def datetime_timezone(self): + return exclusions.only_on("postgresql") + + @property + def time_timezone(self): + return exclusions.only_on("postgresql") + exclusions.skip_if("+pg8000") + @property def datetime_microseconds(self): """target dialect supports representation of Python @@ -1143,6 +1164,10 @@ def timestamp_microseconds(self): return only_on(["oracle"]) + @property + def timestamp_microseconds_implicit_bound(self): + return self.timestamp_microseconds + exclusions.fails_on(["oracle"]) + @property def datetime_historic(self): """target dialect supports representation of Python From 5278c84b35b8fd97dd51a49ff5ab089b4d0f997e Mon Sep 17 00:00:00 2001 From: long2ice Date: Fri, 14 Jan 2022 03:50:26 -0500 Subject: [PATCH 084/632] Remove pymysql in asyncmy Removed unnecessary dependency on PyMySQL from the asyncmy dialect. Pull request courtesy long2ice. Fixes: #7567 Closes: #7568 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7568 Pull-request-sha: 48cbb3e3c249e431dfd91e88fcb3284af83671e5 Change-Id: Iad5048eaa5de6ad6666b2120df6608fd1bf50e02 (cherry picked from commit b4fe2b83ab3ce8cee1e2f4353dfcbea515b4f8d1) --- doc/build/changelog/unreleased_14/7567.rst | 7 +++++++ lib/sqlalchemy/dialects/mysql/asyncmy.py | 19 +++---------------- 2 files changed, 10 insertions(+), 16 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7567.rst diff --git a/doc/build/changelog/unreleased_14/7567.rst b/doc/build/changelog/unreleased_14/7567.rst new file mode 100644 index 00000000000..38fa6f39a14 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7567.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mysql + :tickets: 7567 + + Removed unnecessary dependency on PyMySQL from the asyncmy dialect. Pull + request courtesy long2ice. + diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 9afd41bb3be..16981fd98d9 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -229,9 +229,8 @@ class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection): class AsyncAdapt_asyncmy_dbapi: - def __init__(self, asyncmy, pymysql): + def __init__(self, asyncmy): self.asyncmy = asyncmy - self.pymysql = pymysql self.paramstyle = "format" self._init_dbapi_attributes() @@ -251,16 +250,6 @@ def _init_dbapi_attributes(self): ): setattr(self, name, getattr(self.asyncmy.errors, name)) - for name in ( - "NUMBER", - "STRING", - "DATETIME", - "BINARY", - "TIMESTAMP", - "Binary", - ): - setattr(self, name, getattr(self.pymysql, name)) - def connect(self, *arg, **kw): async_fallback = kw.pop("async_fallback", False) @@ -287,9 +276,7 @@ class MySQLDialect_asyncmy(MySQLDialect_pymysql): @classmethod def dbapi(cls): - return AsyncAdapt_asyncmy_dbapi( - __import__("asyncmy"), __import__("pymysql") - ) + return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy")) @classmethod def get_pool_class(cls, url): @@ -318,7 +305,7 @@ def is_disconnect(self, e, connection, cursor): ) def _found_rows_client_flag(self): - from pymysql.constants import CLIENT + from asyncmy.constants import CLIENT return CLIENT.FOUND_ROWS From 5483ccbc728c92a4786f739bdd73325479aa2c55 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jose=CC=81=20Duarte?= Date: Thu, 13 Jan 2022 17:20:06 -0500 Subject: [PATCH 085/632] Fixes(#7561) Add support for postgres.UUID literal_binds compilation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added string rendering to the :class:`.postgresql.UUID` datatype, so that stringifying a statement with "literal_binds" that uses this type will render an appropriate string value for the PostgreSQL backend. Pull request courtesy José Duarte. Fixes: #7561 Closes: #7563 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7563 Pull-request-sha: cf6fe73265342d7884a940c4b3a34c9552113ec3 Change-Id: I4b162bdcdce2293a90683e36da54e4a891a3c684 (cherry picked from commit 17d228f6268515bbf37fdd70a6ee3a62cb9a0b0c) --- doc/build/changelog/unreleased_14/7561.rst | 8 +++++ lib/sqlalchemy/dialects/postgresql/base.py | 19 ++++++++++- test/dialect/postgresql/test_types.py | 38 +++++++++++++++++++--- 3 files changed, 60 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7561.rst diff --git a/doc/build/changelog/unreleased_14/7561.rst b/doc/build/changelog/unreleased_14/7561.rst new file mode 100644 index 00000000000..18ea1106318 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7561.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: usecase, postgresql + :tickets: 7561 + + Added string rendering to the :class:`.postgresql.UUID` datatype, so that + stringifying a statement with "literal_binds" that uses this type will + render an appropriate string value for the PostgreSQL backend. Pull request + courtesy José Duarte. diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 04b79a7338e..ea31a355266 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1409,7 +1409,6 @@ def bind_expression(self, bindvalue): from ...types import TEXT from ...types import VARCHAR - IDX_USING = re.compile(r"^(?:btree|hash|gist|gin|[\w_]+)$", re.I) AUTOCOMMIT_REGEXP = re.compile( @@ -1756,6 +1755,24 @@ def process(value): else: return None + def literal_processor(self, dialect): + if self.as_uuid: + + def process(value): + if value is not None: + value = "'%s'::UUID" % value + return value + + return process + else: + + def process(value): + if value is not None: + value = "'%s'" % value + return value + + return process + PGUuid = UUID diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index bbd5cadda12..e5b9d48676c 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -47,6 +47,7 @@ from sqlalchemy.exc import CompileError from sqlalchemy.orm import declarative_base from sqlalchemy.orm import Session +from sqlalchemy.sql import bindparam from sqlalchemy.sql import operators from sqlalchemy.sql import sqltypes from sqlalchemy.sql.type_api import Variant @@ -922,11 +923,11 @@ class NumericInterpretationTest(fixtures.TestBase): def test_numeric_codes(self): from sqlalchemy.dialects.postgresql import ( + base, pg8000, pygresql, psycopg2, psycopg2cffi, - base, ) dialects = ( @@ -1415,9 +1416,11 @@ def test_array_agg_generic(self): argnames="with_enum, using_aggregate_order_by", ) def test_array_agg_specific(self, with_enum, using_aggregate_order_by): - from sqlalchemy.dialects.postgresql import aggregate_order_by - from sqlalchemy.dialects.postgresql import array_agg - from sqlalchemy.dialects.postgresql import ENUM + from sqlalchemy.dialects.postgresql import ( + ENUM, + aggregate_order_by, + array_agg, + ) element_type = ENUM if with_enum else Integer expr = ( @@ -2790,6 +2793,33 @@ def test_round_trip(self, datatype, value1, value2, connection): def test_uuid_array(self, datatype, value1, value2, connection): self.test_round_trip(datatype, value1, value2, connection) + @testing.combinations( + ( + "not_as_uuid", + postgresql.UUID(as_uuid=False), + str(uuid.uuid4()), + ), + ( + "as_uuid", + postgresql.UUID(as_uuid=True), + uuid.uuid4(), + ), + id_="iaa", + argnames="datatype, value1", + ) + def test_uuid_literal(self, datatype, value1, connection): + v1 = connection.execute( + select( + bindparam( + "key", + value=value1, + literal_execute=True, + type_=datatype, + ) + ), + ) + eq_(v1.fetchone()[0], value1) + class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" From 16c88b254454cc20cc1a92a23a8d290b8155bbda Mon Sep 17 00:00:00 2001 From: Jaen Saul Date: Fri, 19 Nov 2021 22:12:44 +0200 Subject: [PATCH 086/632] fix(mypy): Placeholder vars should have fullname set. Otherwise the dmypy daemon will crash on an incremental re-run. Fixes: #7347 Closes: #7348 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7348 Pull-request-sha: 79eefa3417e09a8f9defaeafdb9f76d323385238 Change-Id: Id2133e837ee5dcf43461af51458e296353bdad6c (cherry picked from commit 7c170ade108b907c4ea6c7a73cc606afd1838885) --- doc/build/changelog/unreleased_14/7347.rst | 6 ++++++ lib/sqlalchemy/ext/mypy/apply.py | 1 + 2 files changed, 7 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7347.rst diff --git a/doc/build/changelog/unreleased_14/7347.rst b/doc/build/changelog/unreleased_14/7347.rst new file mode 100644 index 00000000000..f259112fd90 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7347.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash when running id daemon mode caused by a + missing attribute on an internal mypy ``Var`` instance. diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index b3af0560c60..99be194cdcc 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -293,6 +293,7 @@ def _apply_placeholder_attr_to_class( else: type_ = AnyType(TypeOfAny.special_form) var = Var(attrname) + var._fullname = cls.fullname + "." + attrname var.info = cls.info var.type = type_ cls.info.names[attrname] = SymbolTableNode(MDEF, var) From e542448ac9a9cb315a010a8973e58ee1157b91ef Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 18 Jan 2022 11:02:57 -0500 Subject: [PATCH 087/632] detect map_imperatively() called twice Fixed issue where calling upon :meth:`_orm.regsitry.map_imperatively` more than once for the same class would produce an unexpected error, rather than an informative error that the target class is already mapped. This behavior differed from that of the :func:`_orm.mapper` function which does report an informative message already. For 2.0, this change also cleans up the logic that detects against `Mapper()` or `_mapper()` being invoked directly. 1.4's backport will take on a different format as `mapper()` is still public API in that release. Fixes: #7579 Change-Id: Ie74a1a2e97f8b6a81ac1942040edd8cae82f4bd8 (cherry picked from commit e6ded82eef63235d7cbfe3ab3382a48f32913640) --- doc/build/changelog/unreleased_14/7579.rst | 9 ++++++ lib/sqlalchemy/orm/decl_api.py | 6 +++- lib/sqlalchemy/orm/mapper.py | 5 ++++ test/orm/test_mapper.py | 35 ++++++++++++++++++++++ 4 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7579.rst diff --git a/doc/build/changelog/unreleased_14/7579.rst b/doc/build/changelog/unreleased_14/7579.rst new file mode 100644 index 00000000000..3cf6c5ba788 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7579.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 7579 + + Fixed issue where calling upon :meth:`_orm.regsitry.map_imperatively` more + than once for the same class would produce an unexpected error, rather than + an informative error that the target class is already mapped. This behavior + differed from that of the :func:`_orm.mapper` function which does report an + informative message already. diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 452f2eaf533..42419e48cde 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -644,7 +644,11 @@ def _dispose_cls(self, cls): def _add_manager(self, manager): self._managers[manager] = True - assert manager.registry is None + if manager.registry is not None and manager.is_mapped: + raise exc.ArgumentError( + "Class '%s' already has a primary mapper defined. " + % manager.class_ + ) manager.registry = self def configure(self, cascade=False): diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 984e4de979a..b12ade59c33 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1230,6 +1230,11 @@ def _configure_class_instrumentation(self): if manager is not None: assert manager.class_ is self.class_ if manager.is_mapped: + # changed in #7579: + # this message is defined in two places as of this change, + # also in decl_api -> _add_manager(). in 2.0, this codepath + # is removed as any calls to mapper() / Mapper without + # the registry setting up first will be rejected. raise sa_exc.ArgumentError( "Class '%s' already has a primary mapper defined. " % self.class_ diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index 0f84923ac85..11a762e60b1 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -23,6 +23,8 @@ from sqlalchemy.orm import dynamic_loader from sqlalchemy.orm import Load from sqlalchemy.orm import load_only +from sqlalchemy.orm import Mapper +from sqlalchemy.orm import mapper from sqlalchemy.orm import reconstructor from sqlalchemy.orm import registry from sqlalchemy.orm import relationship @@ -33,6 +35,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_deprecated_20 from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -114,6 +117,38 @@ class Plain(ComparableMixin): foobar="x", ) + def test_class_already_mapped(self): + users, User = ( + self.tables.users, + self.classes.User, + ) + + self.mapper(User, users) + + with expect_raises_message( + sa.exc.ArgumentError, + "Class .*User.* already has a primary mapper defined", + ): + self.mapper(User, users) + + @testing.combinations(mapper, Mapper) + def test_class_already_mapped_legacy(self, fn): + users, User = ( + self.tables.users, + self.classes.User, + ) + + with expect_deprecated_20( + r"Calling the mapper\(\) function directly outside" + ): + fn(User, users) + + with expect_raises_message( + sa.exc.ArgumentError, + "Class .*User.* already has a primary mapper defined", + ): + fn(User, users) + def test_prop_shadow(self): """A backref name may not shadow an existing property name.""" From 28dfa5cee9d55e1cda2977c840b0c41b8d5de842 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 16 Jan 2022 10:21:45 -0500 Subject: [PATCH 088/632] enhance double-aliased table logic to handle more cases Fixed ORM regression where calling the :func:`_orm.aliased` function against an existing :func:`_orm.aliased` construct would fail to produce correct SQL if the existing construct were against a fixed table. The fix allows that the original :func:`_orm.aliased` construct is disregarded if it were only against a table that's now being replaced. It also allows for correct behavior when constructing a :func:`_orm.aliased` without a selectable argument against a :func:`_orm.aliased` that's against a subuquery, to create an alias of that subquery (i.e. to change its name). The nesting behavior of :func:`_orm.aliased` remains in place for the case where the outer :func:`_orm.aliased` object is against a subquery which in turn refers to the inner :func:`_orm.aliased` object. This is a relatively new 1.4 feature that helps to suit use cases that were previously served by the deprecated ``Query.from_self()`` method. Fixes: #7576 Change-Id: Ia9ca606f6300e38b6040eb6fc7facfe97c8cf057 (cherry picked from commit bb11d5b7c2256861fdfe64f5cded94ce15266132) --- doc/build/changelog/unreleased_14/7576.rst | 18 +++ lib/sqlalchemy/orm/util.py | 24 ++- test/orm/test_froms.py | 162 +++++++++++++++++++++ 3 files changed, 199 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7576.rst diff --git a/doc/build/changelog/unreleased_14/7576.rst b/doc/build/changelog/unreleased_14/7576.rst new file mode 100644 index 00000000000..74d8ac49422 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7576.rst @@ -0,0 +1,18 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7576 + + Fixed ORM regression where calling the :func:`_orm.aliased` function + against an existing :func:`_orm.aliased` construct would fail to produce + correct SQL if the existing construct were against a fixed table. The fix + allows that the original :func:`_orm.aliased` construct is disregarded if + it were only against a table that's now being replaced. It also allows for + correct behavior when constructing a :func:`_orm.aliased` without a + selectable argument against a :func:`_orm.aliased` that's against a + subuquery, to create an alias of that subquery (i.e. to change its name). + + The nesting behavior of :func:`_orm.aliased` remains in place for the case + where the outer :func:`_orm.aliased` object is against a subquery which in + turn refers to the inner :func:`_orm.aliased` object. This is a relatively + new 1.4 feature that helps to suit use cases that were previously served by + the deprecated ``Query.from_self()`` method. diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index c574a39a4bd..d90d44ce900 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -494,11 +494,20 @@ def __init__( insp = inspection.inspect(mapped_class_or_ac) mapper = insp.mapper + nest_adapters = False + if alias is None: - alias = mapper._with_polymorphic_selectable._anonymous_fromclause( - name=name, - flat=flat, - ) + if insp.is_aliased_class and insp.selectable._is_subquery: + alias = insp.selectable.alias() + else: + alias = ( + mapper._with_polymorphic_selectable._anonymous_fromclause( + name=name, + flat=flat, + ) + ) + elif insp.is_aliased_class: + nest_adapters = True self._aliased_insp = AliasedInsp( self, @@ -515,6 +524,7 @@ def __init__( use_mapper_path, adapt_on_names, represents_outer_join, + nest_adapters, ) self.__name__ = "AliasedClass_%s" % mapper.class_.__name__ @@ -651,6 +661,7 @@ def __init__( _use_mapper_path, adapt_on_names, represents_outer_join, + nest_adapters, ): mapped_class_or_ac = inspected.entity @@ -666,6 +677,7 @@ def __init__( self._base_alias = weakref.ref(_base_alias or self) self._use_mapper_path = _use_mapper_path self.represents_outer_join = represents_outer_join + self._nest_adapters = nest_adapters if with_polymorphic_mappers: self._is_with_polymorphic = True @@ -701,7 +713,7 @@ def __init__( ], ) - if inspected.is_aliased_class: + if nest_adapters: self._adapter = inspected._adapter.wrap(self._adapter) self._adapt_on_names = adapt_on_names @@ -772,6 +784,7 @@ def __getstate__(self): "base_alias": self._base_alias(), "use_mapper_path": self._use_mapper_path, "represents_outer_join": self.represents_outer_join, + "nest_adapters": self._nest_adapters, } def __setstate__(self, state): @@ -786,6 +799,7 @@ def __setstate__(self, state): state["use_mapper_path"], state["adapt_on_names"], state["represents_outer_join"], + state["nest_adapters"], ) def _adapt_element(self, elem, key=None): diff --git a/test/orm/test_froms.py b/test/orm/test_froms.py index 6e1c94e12f5..9585da125b4 100644 --- a/test/orm/test_froms.py +++ b/test/orm/test_froms.py @@ -573,6 +573,16 @@ def test_aliases(self): q = s.query(uq1.name, uq2.name).order_by(uq1.name, uq2.name) + self.assert_compile( + q, + "SELECT anon_1.name AS anon_1_name, anon_1.name_1 AS " + "anon_1_name_1 FROM " + "(SELECT users.id AS id, users.name AS name, users_1.id AS id_1, " + "users_1.name AS name_1 FROM users, users AS users_1 " + "WHERE users.id > users_1.id) AS anon_1 " + "ORDER BY anon_1.name, anon_1.name_1", + ) + eq_( q.all(), [ @@ -613,6 +623,158 @@ def test_aliases(self): ], ) + def test_nested_aliases_none_to_none(self): + """test #7576""" + + User = self.classes.User + + u1 = aliased(User) + u2 = aliased(u1) + + self.assert_compile( + select(u2), "SELECT users_1.id, users_1.name FROM users AS users_1" + ) + + def test_nested_alias_none_to_subquery(self): + """test #7576""" + + User = self.classes.User + + subq = select(User.id, User.name).subquery() + + u1 = aliased(User, subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + u2 = aliased(u1) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + def test_nested_alias_subquery_to_subquery_w_replace(self): + """test #7576""" + + User = self.classes.User + + subq = select(User.id, User.name).subquery() + + u1 = aliased(User, subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + u2 = aliased(u1, subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + def test_nested_alias_subquery_to_subquery_w_adaption(self): + """test #7576""" + + User = self.classes.User + + inner_subq = select(User.id, User.name).subquery() + + u1 = aliased(User, inner_subq) + + self.assert_compile( + select(u1), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + + outer_subq = select(u1.id, u1.name).subquery() + + u2 = aliased(u1, outer_subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT anon_2.id AS id, anon_2.name AS name FROM " + "(SELECT users.id AS id, users.name AS name FROM users) " + "AS anon_2) AS anon_1", + ) + + outer_subq = ( + select(u1.id, u1.name, User.id, User.name) + .where(u1.id > User.id) + .subquery() + ) + u2 = aliased(u1, outer_subq) + + # query here is: + # SELECT derived_from_inner_subq.id, derived_from_inner_subq.name + # FROM ( + # SELECT ... FROM inner_subq, users WHERE inner_subq.id > users.id + # ) as outer_subq + self.assert_compile( + select(u2), + "SELECT anon_1.id, anon_1.name FROM " + "(SELECT anon_2.id AS id, anon_2.name AS name, users.id AS id_1, " + "users.name AS name_1 FROM " + "(SELECT users.id AS id, users.name AS name FROM users) " + "AS anon_2, users " + "WHERE anon_2.id > users.id) AS anon_1", + ) + + def test_nested_alias_subquery_w_alias_to_none(self): + """test #7576""" + + User = self.classes.User + + u1 = aliased(User) + + self.assert_compile( + select(u1), "SELECT users_1.id, users_1.name FROM users AS users_1" + ) + + subq = ( + select(User.id, User.name, u1.id, u1.name) + .where(User.id > u1.id) + .subquery() + ) + + # aliased against aliased w/ subquery means, look for u1 inside the + # given subquery. adapt that. + u2 = aliased(u1, subq) + + self.assert_compile( + select(u2), + "SELECT anon_1.id_1, anon_1.name_1 FROM " + "(SELECT users.id AS id, users.name AS name, " + "users_1.id AS id_1, users_1.name AS name_1 " + "FROM users, users AS users_1 " + "WHERE users.id > users_1.id) AS anon_1", + ) + + subq = select(User.id, User.name).subquery() + u2 = aliased(u1, subq) + + # given that, it makes sense that if we remove "u1" from the subquery, + # we get a second FROM element like below. + # this is actually a form of the "wrong" query that was + # reported in #7576, but this is the case where we have a subquery, + # so yes, we need to adapt the "inner" alias to it. + + self.assert_compile( + select(u2), + "SELECT users_1.id, users_1.name FROM users AS users_1, " + "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", + ) + def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address From 692d05a5cc15cf3947055e9089b86ed99be3da46 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 18 Jan 2022 17:19:24 -0500 Subject: [PATCH 089/632] reject methods as lambda SQL callables Added an informative error message when a method object is passed to a SQL construct. Previously, when such a callable were passed, as is a common typographical error when dealing with method-chained SQL constructs, they were interpreted as "lambda SQL" targets to be invoked at compilation time, which would lead to silent failures. As this feature was not intended to be used with methods, method objects are now rejected. Fixes: #7032 Change-Id: If714715bd8c11557ab769ee3b1a24264b0b06acc (cherry picked from commit e28ec27b599558b3e26ced106a972e8b4aa9e668) --- doc/build/changelog/unreleased_14/7032.rst | 10 ++++++++++ lib/sqlalchemy/sql/lambdas.py | 5 +++++ test/sql/test_lambdas.py | 21 +++++++++++++++++++++ 3 files changed, 36 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7032.rst diff --git a/doc/build/changelog/unreleased_14/7032.rst b/doc/build/changelog/unreleased_14/7032.rst new file mode 100644 index 00000000000..c837be49446 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7032.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sql + :tickets: 7032 + + Added an informative error message when a method object is passed to a SQL + construct. Previously, when such a callable were passed, as is a common + typographical error when dealing with method-chained SQL constructs, they + were interpreted as "lambda SQL" targets to be invoked at compilation time, + which would lead to silent failures. As this feature was not intended to be + used with methods, method objects are now rejected. diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index e22f8716710..5f91559987d 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -5,6 +5,7 @@ # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +import inspect import itertools import operator import sys @@ -619,6 +620,10 @@ def get(cls, fn, lambda_element, lambda_kw, **kw): return analyzed def __init__(self, fn, lambda_element, opts): + if inspect.ismethod(fn): + raise exc.ArgumentError( + "Method %s may not be passed as a SQL expression" % fn + ) closure = fn.__closure__ self.track_bound_values = ( diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index 76be0af3cea..29e1258efbf 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -8,6 +8,7 @@ from sqlalchemy.sql import bindparam from sqlalchemy.sql import coercions from sqlalchemy.sql import column +from sqlalchemy.sql import func from sqlalchemy.sql import join from sqlalchemy.sql import lambda_stmt from sqlalchemy.sql import lambdas @@ -38,6 +39,26 @@ class LambdaElementTest( ): __dialect__ = "default" + def test_reject_methods(self): + """test #7032""" + + t1 = table("t1", column("q"), column("p")) + + subq = select(t1).subquery + + with expect_raises_message( + exc.ArgumentError, + "Method Date: Wed, 19 Jan 2022 14:48:51 -0500 Subject: [PATCH 090/632] typo Change-Id: I65cdd4cefdfacb1506c8e32a11c44ff650cd15b6 (cherry picked from commit b235a8cb1042cb7706dddc0f783cabb81e141055) --- doc/build/changelog/unreleased_14/7579.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7579.rst b/doc/build/changelog/unreleased_14/7579.rst index 3cf6c5ba788..01eea6dacf8 100644 --- a/doc/build/changelog/unreleased_14/7579.rst +++ b/doc/build/changelog/unreleased_14/7579.rst @@ -2,7 +2,7 @@ :tags: bug, orm :tickets: 7579 - Fixed issue where calling upon :meth:`_orm.regsitry.map_imperatively` more + Fixed issue where calling upon :meth:`_orm.registry.map_imperatively` more than once for the same class would produce an unexpected error, rather than an informative error that the target class is already mapped. This behavior differed from that of the :func:`_orm.mapper` function which does report an From 1323b436d8612a2684908f30e35a38eb9d74a9f2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Jan 2022 14:31:52 -0500 Subject: [PATCH 091/632] Add AdaptedConnection.run_async Added new method :meth:`.AdaptedConnection.run_async` to the DBAPI connection interface used by asyncio drivers, which allows methods to be called against the underlying "driver" connection directly within a sync-style function where the ``await`` keyword can't be used, such as within SQLAlchemy event handler functions. The method is analogous to the :meth:`_asyncio.AsyncConnection.run_sync` method which translates async-style calls to sync-style. The method is useful for things like connection-pool on-connect handlers that need to invoke awaitable methods on the driver connection when it's first created. Fixes: #7580 Change-Id: I03c98a72bda0234deb19c00095b31a36f19bf36d (cherry picked from commit 09ad975505adb2118f229cb5b1a75c2c412420ae) --- doc/build/changelog/unreleased_14/7580.rst | 18 +++++++ doc/build/orm/extensions/asyncio.rst | 57 ++++++++++++++++++++-- lib/sqlalchemy/engine/interfaces.py | 30 ++++++++++++ test/ext/asyncio/test_engine_py3k.py | 25 ++++++++++ 4 files changed, 127 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7580.rst diff --git a/doc/build/changelog/unreleased_14/7580.rst b/doc/build/changelog/unreleased_14/7580.rst new file mode 100644 index 00000000000..fa02085b2bc --- /dev/null +++ b/doc/build/changelog/unreleased_14/7580.rst @@ -0,0 +1,18 @@ +.. change:: + :tags: usecase, asyncio + :tickets: 7580 + + Added new method :meth:`.AdaptedConnection.run_async` to the DBAPI + connection interface used by asyncio drivers, which allows methods to be + called against the underlying "driver" connection directly within a + sync-style function where the ``await`` keyword can't be used, such as + within SQLAlchemy event handler functions. The method is analogous to the + :meth:`_asyncio.AsyncConnection.run_sync` method which translates + async-style calls to sync-style. The method is useful for things like + connection-pool on-connect handlers that need to invoke awaitable methods + on the driver connection when it's first created. + + .. seealso:: + + :ref:`asyncio_events_run_async` + diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index a7d2fb16be7..0851c529681 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -622,12 +622,63 @@ The above example prints something along the lines of:: to sync, and outgoing messages to the database API will be converted to asyncio transparently. +.. _asyncio_events_run_async: + +Using awaitable-only driver methods in connection pool and other events +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As discussed in the above section, event handlers such as those oriented +around the :class:`.PoolEvents` event handlers receive a sync-style "DBAPI" connection, +which is a wrapper object supplied by SQLAlchemy asyncio dialects to adapt +the underlying asyncio "driver" connection into one that can be used by +SQLAlchemy's internals. A special use case arises when the user-defined +implementation for such an event handler needs to make use of the +ultimate "driver" connection directly, using awaitable only methods on that +driver connection. One such example is the ``.set_type_codec()`` method +supplied by the asyncpg driver. + +To accommodate this use case, SQLAlchemy's :class:`.AdaptedConnection` +class provides a method :meth:`.AdaptedConnection.run_async` that allows +an awaitable function to be invoked within the "synchronous" context of +an event handler or other SQLAlchemy internal. This method is directly +analogous to the :meth:`_asyncio.AsyncConnection.run_sync` method that +allows a sync-style method to run under async. + +:meth:`.AdaptedConnection.run_async` should be passed a function that will +accept the innermost "driver" connection as a single argument, and return +an awaitable that will be invoked by the :meth:`.AdaptedConnection.run_async` +method. The given function itself does not need to be declared as ``async``; +it's perfectly fine for it to be a Python ``lambda:``, as the return awaitable +value will be invoked after being returned:: + + from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy import event + + engine = create_async_engine(...) + + @event.listens_for(engine.sync_engine, "connect") + def register_custom_types(dbapi_connection, ...): + dbapi_connection.run_async( + lambda connection: connection.set_type_codec('MyCustomType', encoder, decoder, ...) + ) + +Above, the object passed to the ``register_custom_types`` event handler +is an instance of :class:`.AdaptedConnection`, which provides a DBAPI-like +interface to an underlying async-only driver-level connection object. +The :meth:`.AdaptedConnection.run_async` method then provides access to an +awaitable environment where the underlying driver level connection may be +acted upon. + +.. versionadded:: 1.4.30 + + Using multiple asyncio event loops ---------------------------------- -An application that makes use of multiple event loops, for example by combining asyncio -with multithreading, should not share the same :class:`_asyncio.AsyncEngine` -with different event loops when using the default pool implementation. +An application that makes use of multiple event loops, for example in the +uncommon case of combining asyncio with multithreading, should not share the +same :class:`_asyncio.AsyncEngine` with different event loops when using the +default pool implementation. If an :class:`_asyncio.AsyncEngine` is be passed from one event loop to another, the method :meth:`_asyncio.AsyncEngine.dispose()` should be called before it's diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index 0bfd8fb8b59..e86fa2b6e78 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -10,6 +10,7 @@ from .. import util from ..sql.compiler import Compiled # noqa from ..sql.compiler import TypeCompiler # noqa +from ..util.concurrency import await_only class Dialect(object): @@ -1752,5 +1753,34 @@ def driver_connection(self): """The connection object as returned by the driver after a connect.""" return self._connection + def run_async(self, fn): + """Run the awaitable returned by the given function, which is passed + the raw asyncio driver connection. + + This is used to invoke awaitable-only methods on the driver connection + within the context of a "synchronous" method, like a connection + pool event handler. + + E.g.:: + + engine = create_async_engine(...) + + @event.listens_for(engine.sync_engine, "connect") + def register_custom_types(dbapi_connection, ...): + dbapi_connection.run_async( + lambda connection: connection.set_type_codec( + 'MyCustomType', encoder, decoder, ... + ) + ) + + .. versionadded:: 1.4.30 + + .. seealso:: + + :ref:`asyncio_events_run_async` + + """ + return await_only(fn(self._connection)) + def __repr__(self): return "" % self._connection diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index bd07bba0db6..e88ef5464e8 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -1,4 +1,5 @@ import asyncio +import inspect as stdlib_inspect from sqlalchemy import Column from sqlalchemy import create_engine @@ -220,6 +221,30 @@ def test_proxied_attrs_engine(self, async_engine): eq_(async_engine.driver, sync_engine.driver) eq_(async_engine.echo, sync_engine.echo) + @async_test + async def test_run_async(self, async_engine): + async def test_meth(async_driver_connection): + # there's no method that's guaranteed to be on every + # driver, so just stringify it and compare that to the + # outside + return str(async_driver_connection) + + def run_sync_to_async(connection): + connection_fairy = connection.connection + async_return = connection_fairy.run_async( + lambda driver_connection: test_meth(driver_connection) + ) + assert not stdlib_inspect.iscoroutine(async_return) + return async_return + + async with async_engine.connect() as conn: + driver_connection = ( + await conn.get_raw_connection() + ).driver_connection + res = await conn.run_sync(run_sync_to_async) + assert not stdlib_inspect.iscoroutine(res) + eq_(res, str(driver_connection)) + @async_test async def test_engine_eq_ne(self, async_engine): e2 = _async_engine.AsyncEngine(async_engine.sync_engine) From 87c11c3162255ce9eb550b86fb46772b8c19e7dc Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 19 Jan 2022 22:10:09 +0100 Subject: [PATCH 092/632] Added missing method ``invalidate` in the `AsyncSession` Fixes: #7524 Change-Id: I20387e6700015c44f23bd2d05347bdce802196c0 (cherry picked from commit e5606c0ba8bf2fed6daa7a488433d55ddbf302e9) --- doc/build/changelog/unreleased_14/7524.rst | 7 +++++++ lib/sqlalchemy/ext/asyncio/scoping.py | 1 + lib/sqlalchemy/ext/asyncio/session.py | 7 +++++++ test/ext/asyncio/test_session_py3k.py | 17 +++++++++++++++++ 4 files changed, 32 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7524.rst diff --git a/doc/build/changelog/unreleased_14/7524.rst b/doc/build/changelog/unreleased_14/7524.rst new file mode 100644 index 00000000000..68ceefd67aa --- /dev/null +++ b/doc/build/changelog/unreleased_14/7524.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, orm, asyncio + :tickets: 7524 + + Added missing method :meth:`_asyncio.AsyncSession.invalidate` to the + :class:`_asyncio.AsyncSession` class. + diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 535c30d6154..46c8f0baa77 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -36,6 +36,7 @@ "get", "get_bind", "is_modified", + "invalidate", "merge", "refresh", "rollback", diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 67db7110610..b685218d960 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -605,6 +605,13 @@ async def close(self): """ return await greenlet_spawn(self.sync_session.close) + async def invalidate(self): + """Close this Session, using connection invalidation. + + For a complete description, see :meth:`_orm.Session.invalidate`. + """ + return await greenlet_spawn(self.sync_session.invalidate) + @classmethod async def close_all(self): """Close all :class:`_asyncio.AsyncSession` sessions.""" diff --git a/test/ext/asyncio/test_session_py3k.py b/test/ext/asyncio/test_session_py3k.py index 4e475b2122e..bcaea05e53f 100644 --- a/test/ext/asyncio/test_session_py3k.py +++ b/test/ext/asyncio/test_session_py3k.py @@ -22,6 +22,7 @@ from sqlalchemy.testing import is_ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import is_false from .test_engine_py3k import AsyncFixture as _AsyncFixture from ...orm import _fixtures @@ -488,6 +489,22 @@ def end_savepoint(session, transaction): result = await async_session.execute(select(User)) eq_(result.all(), []) + @async_test + @testing.requires.independent_connections + async def test_invalidate(self, async_session): + await async_session.execute(select(1)) + conn = async_session.sync_session.connection() + fairy = conn.connection + connection_rec = fairy._connection_record + + is_false(conn.closed) + is_false(connection_rec._is_hard_or_soft_invalidated()) + await async_session.invalidate() + is_true(conn.closed) + is_true(connection_rec._is_hard_or_soft_invalidated()) + + eq_(async_session.in_transaction(), False) + class AsyncCascadesTest(AsyncFixture): run_inserts = None From 38bbd4686180d01c8fa0538f2f72b01e81380166 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Jan 2022 18:11:01 -0500 Subject: [PATCH 093/632] - 1.4.30 --- doc/build/changelog/changelog_14.rst | 187 ++++++++++++++++++++- doc/build/changelog/unreleased_14/7032.rst | 10 -- doc/build/changelog/unreleased_14/7347.rst | 6 - doc/build/changelog/unreleased_14/7382.rst | 9 - doc/build/changelog/unreleased_14/7418.rst | 5 - doc/build/changelog/unreleased_14/7505.rst | 14 -- doc/build/changelog/unreleased_14/7507.rst | 15 -- doc/build/changelog/unreleased_14/7514.rst | 9 - doc/build/changelog/unreleased_14/7518.rst | 8 - doc/build/changelog/unreleased_14/7524.rst | 7 - doc/build/changelog/unreleased_14/7537.rst | 17 -- doc/build/changelog/unreleased_14/7557.rst | 9 - doc/build/changelog/unreleased_14/7561.rst | 8 - doc/build/changelog/unreleased_14/7567.rst | 7 - doc/build/changelog/unreleased_14/7576.rst | 18 -- doc/build/changelog/unreleased_14/7579.rst | 9 - doc/build/changelog/unreleased_14/7580.rst | 18 -- doc/build/conf.py | 4 +- 18 files changed, 188 insertions(+), 172 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7032.rst delete mode 100644 doc/build/changelog/unreleased_14/7347.rst delete mode 100644 doc/build/changelog/unreleased_14/7382.rst delete mode 100644 doc/build/changelog/unreleased_14/7418.rst delete mode 100644 doc/build/changelog/unreleased_14/7505.rst delete mode 100644 doc/build/changelog/unreleased_14/7507.rst delete mode 100644 doc/build/changelog/unreleased_14/7514.rst delete mode 100644 doc/build/changelog/unreleased_14/7518.rst delete mode 100644 doc/build/changelog/unreleased_14/7524.rst delete mode 100644 doc/build/changelog/unreleased_14/7537.rst delete mode 100644 doc/build/changelog/unreleased_14/7557.rst delete mode 100644 doc/build/changelog/unreleased_14/7561.rst delete mode 100644 doc/build/changelog/unreleased_14/7567.rst delete mode 100644 doc/build/changelog/unreleased_14/7576.rst delete mode 100644 doc/build/changelog/unreleased_14/7579.rst delete mode 100644 doc/build/changelog/unreleased_14/7580.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 11d57f87b3a..54422ead3ed 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,192 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.30 - :include_notes_from: unreleased_14 + :released: January 19, 2022 + + .. change:: + :tags: usecase, asyncio + :tickets: 7580 + + Added new method :meth:`.AdaptedConnection.run_async` to the DBAPI + connection interface used by asyncio drivers, which allows methods to be + called against the underlying "driver" connection directly within a + sync-style function where the ``await`` keyword can't be used, such as + within SQLAlchemy event handler functions. The method is analogous to the + :meth:`_asyncio.AsyncConnection.run_sync` method which translates + async-style calls to sync-style. The method is useful for things like + connection-pool on-connect handlers that need to invoke awaitable methods + on the driver connection when it's first created. + + .. seealso:: + + :ref:`asyncio_events_run_async` + + + .. change:: + :tags: bug, orm + :tickets: 7507 + + Fixed issue in joined-inheritance load of additional attributes + functionality in deep multi-level inheritance where an intermediary table + that contained no columns would not be included in the tables joined, + instead linking those tables to their primary key identifiers. While this + works fine, it nonetheless in 1.4 began producing the cartesian product + compiler warning. The logic has been changed so that these intermediary + tables are included regardless. While this does include additional tables + in the query that are not technically necessary, this only occurs for the + highly unusual case of deep 3+ level inheritance with intermediary tables + that have no non primary key columns, potential performance impact is + therefore expected to be negligible. + + .. change:: + :tags: bug, orm + :tickets: 7579 + + Fixed issue where calling upon :meth:`_orm.registry.map_imperatively` more + than once for the same class would produce an unexpected error, rather than + an informative error that the target class is already mapped. This behavior + differed from that of the :func:`_orm.mapper` function which does report an + informative message already. + + .. change:: + :tags: bug, sql, postgresql + :tickets: 7537 + + Added additional rule to the system that determines ``TypeEngine`` + implementations from Python literals to apply a second level of adjustment + to the type, so that a Python datetime with or without tzinfo can set the + ``timezone=True`` parameter on the returned :class:`.DateTime` object, as + well as :class:`.Time`. This helps with some round-trip scenarios on + type-sensitive PostgreSQL dialects such as asyncpg, psycopg3 (2.0 only). + + .. change:: + :tags: bug, postgresql, asyncpg + :tickets: 7537 + + Improved support for asyncpg handling of TIME WITH TIMEZONE, which + was not fully implemented. + + .. change:: + :tags: usecase, postgresql + :tickets: 7561 + + Added string rendering to the :class:`.postgresql.UUID` datatype, so that + stringifying a statement with "literal_binds" that uses this type will + render an appropriate string value for the PostgreSQL backend. Pull request + courtesy José Duarte. + + .. change:: + :tags: bug, orm, asyncio + :tickets: 7524 + + Added missing method :meth:`_asyncio.AsyncSession.invalidate` to the + :class:`_asyncio.AsyncSession` class. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7557 + + Fixed regression which appeared in 1.4.23 which could cause loader options + to be mis-handled in some cases, in particular when using joined table + inheritance in combination with the ``polymorphic_load="selectin"`` option + as well as relationship lazy loading, leading to a ``TypeError``. + + + .. change:: + :tags: bug, mypy + :tickets: 7321 + + Fixed Mypy crash when running id daemon mode caused by a + missing attribute on an internal mypy ``Var`` instance. + + .. change:: + :tags: change, mysql + :tickets: 7518 + + Replace ``SHOW VARIABLES LIKE`` statement with equivalent + ``SELECT @@variable`` in MySQL and MariaDB dialect initialization. + This should avoid mutex contention caused by ``SHOW VARIABLES``, + improving initialization performance. + + .. change:: + :tags: bug, orm, regression + :tickets: 7576 + + Fixed ORM regression where calling the :func:`_orm.aliased` function + against an existing :func:`_orm.aliased` construct would fail to produce + correct SQL if the existing construct were against a fixed table. The fix + allows that the original :func:`_orm.aliased` construct is disregarded if + it were only against a table that's now being replaced. It also allows for + correct behavior when constructing a :func:`_orm.aliased` without a + selectable argument against a :func:`_orm.aliased` that's against a + subuquery, to create an alias of that subquery (i.e. to change its name). + + The nesting behavior of :func:`_orm.aliased` remains in place for the case + where the outer :func:`_orm.aliased` object is against a subquery which in + turn refers to the inner :func:`_orm.aliased` object. This is a relatively + new 1.4 feature that helps to suit use cases that were previously served by + the deprecated ``Query.from_self()`` method. + + .. change:: + :tags: bug, orm + :tickets: 7514 + + Fixed issue where :meth:`_sql.Select.correlate_except` method, when passed + either the ``None`` value or no arguments, would not correlate any elements + when used in an ORM context (that is, passing ORM entities as FROM + clauses), rather than causing all FROM elements to be considered as + "correlated" in the same way which occurs when using Core-only constructs. + + .. change:: + :tags: bug, orm, regression + :tickets: 7505 + + Fixed regression from 1.3 where the "subqueryload" loader strategy would + fail with a stack trace if used against a query that made use of + :meth:`_orm.Query.from_statement` or :meth:`_sql.Select.from_statement`. As + subqueryload requires modifying the original statement, it's not compatible + with the "from_statement" use case, especially for statements made against + the :func:`_sql.text` construct. The behavior now is equivalent to that of + 1.3 and previously, which is that the loader strategy silently degrades to + not be used for such statements, typically falling back to using the + lazyload strategy. + + + .. change:: + :tags: bug, reflection, postgresql, mssql + :tickets: 7382 + + Fixed reflection of covering indexes to report ``include_columns`` as part + of the ``dialect_options`` entry in the reflected index dictionary, thereby + enabling round trips from reflection->create to be complete. Included + columns continue to also be present under the ``include_columns`` key for + backwards compatibility. + + .. change:: + :tags: bug, mysql + :tickets: 7567 + + Removed unnecessary dependency on PyMySQL from the asyncmy dialect. Pull + request courtesy long2ice. + + + .. change:: + :tags: bug, postgresql + :tickets: 7418 + + Fixed handling of array of enum values which require escape characters. + + .. change:: + :tags: bug, sql + :tickets: 7032 + + Added an informative error message when a method object is passed to a SQL + construct. Previously, when such a callable were passed, as is a common + typographical error when dealing with method-chained SQL constructs, they + were interpreted as "lambda SQL" targets to be invoked at compilation time, + which would lead to silent failures. As this feature was not intended to be + used with methods, method objects are now rejected. .. changelog:: :version: 1.4.29 diff --git a/doc/build/changelog/unreleased_14/7032.rst b/doc/build/changelog/unreleased_14/7032.rst deleted file mode 100644 index c837be49446..00000000000 --- a/doc/build/changelog/unreleased_14/7032.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7032 - - Added an informative error message when a method object is passed to a SQL - construct. Previously, when such a callable were passed, as is a common - typographical error when dealing with method-chained SQL constructs, they - were interpreted as "lambda SQL" targets to be invoked at compilation time, - which would lead to silent failures. As this feature was not intended to be - used with methods, method objects are now rejected. diff --git a/doc/build/changelog/unreleased_14/7347.rst b/doc/build/changelog/unreleased_14/7347.rst deleted file mode 100644 index f259112fd90..00000000000 --- a/doc/build/changelog/unreleased_14/7347.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, mypy - :tickets: 7321 - - Fixed Mypy crash when running id daemon mode caused by a - missing attribute on an internal mypy ``Var`` instance. diff --git a/doc/build/changelog/unreleased_14/7382.rst b/doc/build/changelog/unreleased_14/7382.rst deleted file mode 100644 index db6ae453114..00000000000 --- a/doc/build/changelog/unreleased_14/7382.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, reflection, postgresql, mssql - :tickets: 7382 - - Fixed reflection of covering indexes to report ``include_columns`` as part - of the ``dialect_options`` entry in the reflected index dictionary, thereby - enabling round trips from reflection->create to be complete. Included - columns continue to also be present under the ``include_columns`` key for - backwards compatibility. diff --git a/doc/build/changelog/unreleased_14/7418.rst b/doc/build/changelog/unreleased_14/7418.rst deleted file mode 100644 index e1e192571d7..00000000000 --- a/doc/build/changelog/unreleased_14/7418.rst +++ /dev/null @@ -1,5 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 7418 - - Fixed handling of array of enum values which require escape characters. diff --git a/doc/build/changelog/unreleased_14/7505.rst b/doc/build/changelog/unreleased_14/7505.rst deleted file mode 100644 index b017c0ae138..00000000000 --- a/doc/build/changelog/unreleased_14/7505.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7505 - - Fixed regression from 1.3 where the "subqueryload" loader strategy would - fail with a stack trace if used against a query that made use of - :meth:`_orm.Query.from_statement` or :meth:`_sql.Select.from_statement`. As - subqueryload requires modifying the original statement, it's not compatible - with the "from_statement" use case, especially for statements made against - the :func:`_sql.text` construct. The behavior now is equivalent to that of - 1.3 and previously, which is that the loader strategy silently degrades to - not be used for such statements, typically falling back to using the - lazyload strategy. - diff --git a/doc/build/changelog/unreleased_14/7507.rst b/doc/build/changelog/unreleased_14/7507.rst deleted file mode 100644 index 7412c7f0ce9..00000000000 --- a/doc/build/changelog/unreleased_14/7507.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7507 - - Fixed issue in joined-inheritance load of additional attributes - functionality in deep multi-level inheritance where an intermediary table - that contained no columns would not be included in the tables joined, - instead linking those tables to their primary key identifiers. While this - works fine, it nonetheless in 1.4 began producing the cartesian product - compiler warning. The logic has been changed so that these intermediary - tables are included regardless. While this does include additional tables - in the query that are not technically necessary, this only occurs for the - highly unusual case of deep 3+ level inheritance with intermediary tables - that have no non primary key columns, potential performance impact is - therefore expected to be negligible. diff --git a/doc/build/changelog/unreleased_14/7514.rst b/doc/build/changelog/unreleased_14/7514.rst deleted file mode 100644 index bf6fd471eed..00000000000 --- a/doc/build/changelog/unreleased_14/7514.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7514 - - Fixed issue where :meth:`_sql.Select.correlate_except` method, when passed - either the ``None`` value or no arguments, would not correlate any elements - when used in an ORM context (that is, passing ORM entities as FROM - clauses), rather than causing all FROM elements to be considered as - "correlated" in the same way which occurs when using Core-only constructs. diff --git a/doc/build/changelog/unreleased_14/7518.rst b/doc/build/changelog/unreleased_14/7518.rst deleted file mode 100644 index 6264297cb1f..00000000000 --- a/doc/build/changelog/unreleased_14/7518.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: change, mysql - :tickets: 7518 - - Replace ``SHOW VARIABLES LIKE`` statement with equivalent - ``SELECT @@variable`` in MySQL and MariaDB dialect initialization. - This should avoid mutex contention caused by ``SHOW VARIABLES``, - improving initialization performance. diff --git a/doc/build/changelog/unreleased_14/7524.rst b/doc/build/changelog/unreleased_14/7524.rst deleted file mode 100644 index 68ceefd67aa..00000000000 --- a/doc/build/changelog/unreleased_14/7524.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm, asyncio - :tickets: 7524 - - Added missing method :meth:`_asyncio.AsyncSession.invalidate` to the - :class:`_asyncio.AsyncSession` class. - diff --git a/doc/build/changelog/unreleased_14/7537.rst b/doc/build/changelog/unreleased_14/7537.rst deleted file mode 100644 index d48cf30a077..00000000000 --- a/doc/build/changelog/unreleased_14/7537.rst +++ /dev/null @@ -1,17 +0,0 @@ -.. change:: - :tags: bug, sql, postgresql - :tickets: 7537 - - Added additional rule to the system that determines ``TypeEngine`` - implementations from Python literals to apply a second level of adjustment - to the type, so that a Python datetime with or without tzinfo can set the - ``timezone=True`` parameter on the returned :class:`.DateTime` object, as - well as :class:`.Time`. This helps with some round-trip scenarios on - type-sensitive PostgreSQL dialects such as asyncpg, psycopg3 (2.0 only). - -.. change:: - :tags: bug, postgresql, asyncpg - :tickets: 7537 - - Improved support for asyncpg handling of TIME WITH TIMEZONE, which - was not fully implemented. diff --git a/doc/build/changelog/unreleased_14/7557.rst b/doc/build/changelog/unreleased_14/7557.rst deleted file mode 100644 index b7ccc87cf27..00000000000 --- a/doc/build/changelog/unreleased_14/7557.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7557 - - Fixed regression which appeared in 1.4.23 which could cause loader options - to be mis-handled in some cases, in particular when using joined table - inheritance in combination with the ``polymorphic_load="selectin"`` option - as well as relationship lazy loading, leading to a ``TypeError``. - diff --git a/doc/build/changelog/unreleased_14/7561.rst b/doc/build/changelog/unreleased_14/7561.rst deleted file mode 100644 index 18ea1106318..00000000000 --- a/doc/build/changelog/unreleased_14/7561.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: usecase, postgresql - :tickets: 7561 - - Added string rendering to the :class:`.postgresql.UUID` datatype, so that - stringifying a statement with "literal_binds" that uses this type will - render an appropriate string value for the PostgreSQL backend. Pull request - courtesy José Duarte. diff --git a/doc/build/changelog/unreleased_14/7567.rst b/doc/build/changelog/unreleased_14/7567.rst deleted file mode 100644 index 38fa6f39a14..00000000000 --- a/doc/build/changelog/unreleased_14/7567.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 7567 - - Removed unnecessary dependency on PyMySQL from the asyncmy dialect. Pull - request courtesy long2ice. - diff --git a/doc/build/changelog/unreleased_14/7576.rst b/doc/build/changelog/unreleased_14/7576.rst deleted file mode 100644 index 74d8ac49422..00000000000 --- a/doc/build/changelog/unreleased_14/7576.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7576 - - Fixed ORM regression where calling the :func:`_orm.aliased` function - against an existing :func:`_orm.aliased` construct would fail to produce - correct SQL if the existing construct were against a fixed table. The fix - allows that the original :func:`_orm.aliased` construct is disregarded if - it were only against a table that's now being replaced. It also allows for - correct behavior when constructing a :func:`_orm.aliased` without a - selectable argument against a :func:`_orm.aliased` that's against a - subuquery, to create an alias of that subquery (i.e. to change its name). - - The nesting behavior of :func:`_orm.aliased` remains in place for the case - where the outer :func:`_orm.aliased` object is against a subquery which in - turn refers to the inner :func:`_orm.aliased` object. This is a relatively - new 1.4 feature that helps to suit use cases that were previously served by - the deprecated ``Query.from_self()`` method. diff --git a/doc/build/changelog/unreleased_14/7579.rst b/doc/build/changelog/unreleased_14/7579.rst deleted file mode 100644 index 01eea6dacf8..00000000000 --- a/doc/build/changelog/unreleased_14/7579.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7579 - - Fixed issue where calling upon :meth:`_orm.registry.map_imperatively` more - than once for the same class would produce an unexpected error, rather than - an informative error that the target class is already mapped. This behavior - differed from that of the :func:`_orm.mapper` function which does report an - informative message already. diff --git a/doc/build/changelog/unreleased_14/7580.rst b/doc/build/changelog/unreleased_14/7580.rst deleted file mode 100644 index fa02085b2bc..00000000000 --- a/doc/build/changelog/unreleased_14/7580.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. change:: - :tags: usecase, asyncio - :tickets: 7580 - - Added new method :meth:`.AdaptedConnection.run_async` to the DBAPI - connection interface used by asyncio drivers, which allows methods to be - called against the underlying "driver" connection directly within a - sync-style function where the ``await`` keyword can't be used, such as - within SQLAlchemy event handler functions. The method is analogous to the - :meth:`_asyncio.AsyncConnection.run_sync` method which translates - async-style calls to sync-style. The method is useful for things like - connection-pool on-connect handlers that need to invoke awaitable methods - on the driver connection when it's first created. - - .. seealso:: - - :ref:`asyncio_events_run_async` - diff --git a/doc/build/conf.py b/doc/build/conf.py index 2db20708000..f964e14e391 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -205,9 +205,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.29" +release = "1.4.30" -release_date = "December 22, 2021" +release_date = "January 19, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 541debf1e381d639bc3da78de78ab7282bfb57d3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Jan 2022 18:18:12 -0500 Subject: [PATCH 094/632] Version 1.4.31 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 54422ead3ed..32580a7e079 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.31 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.30 :released: January 19, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index a860e7a04ee..12056602e9c 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.30" +__version__ = "1.4.31" def __go(lcls): From a3ee2a731b4a3e4177293104e47c4cf1fc7b9a11 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jan 2022 09:31:42 -0500 Subject: [PATCH 095/632] repair mapper sort Fixed issue in :meth:`_orm.Session.bulk_save_mappings` where the sorting that takes place when the ``preserve_order`` parameter is set to False would sort partially on ``Mapper`` objects, which is rejected in Python 3.11. Also uses typing_extensions for NotRequired as this symbol does not seem to be in Python 3.11.0a4 yet. (2.0 only) Fixes: #7591 Change-Id: I24a62f2322ad7dac5d8e4a00853f8a9408877c9c (cherry picked from commit 8d3d934c16a91adcdc7f374c01761b18fbba74d9) --- doc/build/changelog/unreleased_14/7591.rst | 9 +++++++++ lib/sqlalchemy/orm/session.py | 20 +++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7591.rst diff --git a/doc/build/changelog/unreleased_14/7591.rst b/doc/build/changelog/unreleased_14/7591.rst new file mode 100644 index 00000000000..4ecf983d11f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7591.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 7591 + + Fixed issue in :meth:`_orm.Session.bulk_save_mappings` where the sorting + that takes place when the ``preserve_order`` parameter is set to False + would sort partially on ``Mapper`` objects, which is rejected in Python + 3.11. + diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 49e8060d082..d5a80953d6e 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -3602,14 +3602,24 @@ def bulk_save_objects( """ - def key(state): - return (state.mapper, state.key is not None) - obj_states = (attributes.instance_state(obj) for obj in objects) + if not preserve_order: - obj_states = sorted(obj_states, key=key) + # the purpose of this sort is just so that common mappers + # and persistence states are grouped together, so that groupby + # will return a single group for a particular type of mapper. + # it's not trying to be deterministic beyond that. + obj_states = sorted( + obj_states, + key=lambda state: (id(state.mapper), state.key is not None), + ) - for (mapper, isupdate), states in itertools.groupby(obj_states, key): + def grouping_key(state): + return (state.mapper, state.key is not None) + + for (mapper, isupdate), states in itertools.groupby( + obj_states, grouping_key + ): self._bulk_save_mappings( mapper, states, From 9f86e4362c8af5816555d2b5991439f9c5b0ac52 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jan 2022 15:01:47 -0500 Subject: [PATCH 096/632] restore empty list logic to ARRAY of ENUM parsing Fixed regression where the change in :ticket:`7148` to repair ENUM handling in PostgreSQL broke the use case of an empty ARRAY of ENUM, preventing rows that contained an empty array from being handled correctly when fetching results. Fixes: #7590 Change-Id: I43a35ef25281a6e0a26b698efebef6ba12a63e8c (cherry picked from commit dda5c43cab88daad02bc871cf40bf4984e94a031) --- doc/build/changelog/unreleased_14/7590.rst | 8 ++++++++ lib/sqlalchemy/dialects/postgresql/array.py | 3 ++- test/dialect/postgresql/test_types.py | 18 +++++++++++++++--- 3 files changed, 25 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7590.rst diff --git a/doc/build/changelog/unreleased_14/7590.rst b/doc/build/changelog/unreleased_14/7590.rst new file mode 100644 index 00000000000..822adf6dd05 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7590.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, postgresql, regression + :tickets: 7590 + + Fixed regression where the change in :ticket:`7148` to repair ENUM handling + in PostgreSQL broke the use case of an empty ARRAY of ENUM, preventing rows + that contained an empty array from being handled correctly when fetching + results. diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 568e5b7b065..daf7c5d40d0 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -389,9 +389,10 @@ def process(value): def _split_enum_values(array_string): + if '"' not in array_string: # no escape char is present so it can just split on the comma - return array_string.split(",") + return array_string.split(",") if array_string else [] # handles quoted strings from: # r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr' diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index e5b9d48676c..8ec345d170a 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -1908,7 +1908,7 @@ def test_array_plus_native_enum_create(self, metadata, connection): t.drop(connection) eq_(inspect(connection).get_enums(), []) - def _type_combinations(exclude_json=False): + def _type_combinations(exclude_json=False, exclude_empty_lists=False): def str_values(x): return ["one", "two: %s" % x, "three", "four", "five"] @@ -1942,6 +1942,9 @@ def enum_values(x): AnEnum.Foo, ] + def empty_list(x): + return [] + class inet_str(str): def __eq__(self, other): return str(self) == str(other) @@ -2075,6 +2078,15 @@ def difficult_enum_values(x): ), ] + if not exclude_empty_lists: + elements.extend( + [ + (postgresql.ENUM(AnEnum), empty_list), + (sqltypes.Enum(AnEnum, native_enum=True), empty_list), + (sqltypes.Enum(AnEnum, native_enum=False), empty_list), + (postgresql.ENUM(AnEnum, native_enum=True), empty_list), + ] + ) if not exclude_json: elements.extend( [ @@ -2163,7 +2175,7 @@ def test_type_specific_value_update( connection.scalar(select(table.c.bar).where(table.c.id == 2)), ) - @_type_combinations() + @_type_combinations(exclude_empty_lists=True) def test_type_specific_slice_update( self, type_specific_fixture, connection, type_, gen ): @@ -2190,7 +2202,7 @@ def test_type_specific_slice_update( eq_(rows, [(gen(1),), (sliced_gen,)]) - @_type_combinations(exclude_json=True) + @_type_combinations(exclude_json=True, exclude_empty_lists=True) def test_type_specific_value_delete( self, type_specific_fixture, connection, type_, gen ): From e527360f4b719ce7f471a83756b3008b90a886a8 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 19 Jan 2022 23:31:13 +0100 Subject: [PATCH 097/632] Added support for ``FILESTREAM`` in MSSQL. Fixes: #7243 Change-Id: I99880f429dbaac525bdf7d44438aaab6bc8d0ca6 (cherry picked from commit 64fd7a3968448f21ce1c14bff89fc78e949e04d1) --- doc/build/changelog/unreleased_14/7243.rst | 10 ++++++ doc/build/dialects/mssql.rst | 3 ++ lib/sqlalchemy/dialects/mssql/base.py | 37 ++++++++++++++++++---- test/dialect/mssql/test_types.py | 36 +++++++++++++++++++++ test/requirements.py | 14 ++++++++ 5 files changed, 94 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7243.rst diff --git a/doc/build/changelog/unreleased_14/7243.rst b/doc/build/changelog/unreleased_14/7243.rst new file mode 100644 index 00000000000..b4661c2942c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7243.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: mssql + :tickets: 7243 + + Added support for ``FILESTREAM`` when using ``VARBINARY(max)`` + in MSSQL. + + .. seealso:: + + :paramref:`_mssql.VARBINARY.filestream` diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index 2bad5c9e2c2..f3060e62f6f 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -108,6 +108,9 @@ construction arguments, are as follows: :members: __init__ +.. autoclass:: VARBINARY + :members: __init__ + .. autoclass:: VARCHAR :members: __init__ diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 9f15aa8e396..5d8e50213ab 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1285,9 +1285,10 @@ class NTEXT(sqltypes.UnicodeText): class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): """The MSSQL VARBINARY type. - This type is present to support "deprecate_large_types" mode where - either ``VARBINARY(max)`` or IMAGE is rendered. Otherwise, this type - object is redundant vs. :class:`_types.VARBINARY`. + This type adds additional features to the core :class:`_types.VARBINARY` + type, including "deprecate_large_types" mode where + either ``VARBINARY(max)`` or IMAGE is rendered, as well as the SQL + Server ``FILESTREAM`` option. .. versionadded:: 1.0.0 @@ -1295,12 +1296,33 @@ class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary): :ref:`mssql_large_type_deprecation` - - """ __visit_name__ = "VARBINARY" + def __init__(self, length=None, filestream=False): + """ + Construct a VARBINARY type. + + :param length: optional, a length for the column for use in + DDL statements, for those binary types that accept a length, + such as the MySQL BLOB type. + + :param filestream=False: if True, renders the ``FILESTREAM`` keyword + in the table definition. In this case ``length`` must be ``None`` + or ``'max'``. + + .. versionadded:: 1.4.31 + + """ + + self.filestream = filestream + if self.filestream and length not in (None, "max"): + raise ValueError( + "length must be None or 'max' when setting filestream" + ) + super(VARBINARY, self).__init__(length=length) + class IMAGE(sqltypes.LargeBinary): __visit_name__ = "IMAGE" @@ -1569,7 +1591,10 @@ def visit_XML(self, type_, **kw): return "XML" def visit_VARBINARY(self, type_, **kw): - return self._extend("VARBINARY", type_, length=type_.length or "max") + text = self._extend("VARBINARY", type_, length=type_.length or "max") + if getattr(type_, "filestream", False): + text += " FILESTREAM" + return text def visit_boolean(self, type_, **kw): return self.visit_BIT(type_) diff --git a/test/dialect/mssql/test_types.py b/test/dialect/mssql/test_types.py index 7e238dd8196..6004bc295b1 100644 --- a/test/dialect/mssql/test_types.py +++ b/test/dialect/mssql/test_types.py @@ -50,6 +50,7 @@ from sqlalchemy.testing import emits_warning_on from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not @@ -531,6 +532,12 @@ def test_binary(self): (mssql.MSVarBinary, [10], {}, "VARBINARY(10)"), (types.VARBINARY, [10], {}, "VARBINARY(10)"), (types.VARBINARY, [], {}, "VARBINARY(max)"), + ( + mssql.MSVarBinary, + [], + {"filestream": True}, + "VARBINARY(max) FILESTREAM", + ), (mssql.MSImage, [], {}, "IMAGE"), (mssql.IMAGE, [], {}, "IMAGE"), (types.LargeBinary, [], {}, "IMAGE"), @@ -554,6 +561,17 @@ def test_binary(self): ) self.assert_(repr(col)) + def test_VARBINARY_init(self): + d = mssql.dialect() + t = mssql.MSVarBinary(length=None, filestream=True) + eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM") + t = mssql.MSVarBinary(length="max", filestream=True) + eq_(str(t.compile(dialect=d)), "VARBINARY(max) FILESTREAM") + with expect_raises_message( + ValueError, "length must be None or 'max' when setting filestream" + ): + mssql.MSVarBinary(length=1000, filestream=True) + class TypeRoundTripTest( fixtures.TestBase, AssertsExecutionResults, ComparesTables @@ -1012,6 +1030,15 @@ def test_binary_reflection(self, metadata, deprecate_large_types): ), ] + if testing.requires.mssql_filestream.enabled: + columns.append( + ( + mssql.MSVarBinary, + [], + {"filestream": True}, + "VARBINARY(max) FILESTREAM", + ) + ) engine = engines.testing_engine( options={"deprecate_large_types": deprecate_large_types} ) @@ -1254,6 +1281,15 @@ class BinaryTest(fixtures.TestBase): None, False, ), + ( + mssql.VARBINARY(filestream=True), + "binary_data_one.dat", + None, + True, + None, + False, + testing.requires.mssql_filestream, + ), ( sqltypes.LargeBinary, "binary_data_one.dat", diff --git a/test/requirements.py b/test/requirements.py index bf83b83b48b..883323f4ecc 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1861,3 +1861,17 @@ def autoincrement_without_sequence(self): def reflect_tables_no_columns(self): # so far sqlite, mariadb, mysql don't support this return only_on(["postgresql"]) + + @property + def mssql_filestream(self): + "returns if mssql supports filestream" + + def check(config): + with config.db.connect() as conn: + res = conn.exec_driver_sql( + "SELECT [type] FROM sys.master_files WHERE " + "database_id = DB_ID() AND [type] = 2" + ).scalar() + return res is not None + + return only_on(["mssql"]) + only_if(check) From f3c1271953287a94a52075cd9ad6032c43d4bfa5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jan 2022 15:21:17 -0500 Subject: [PATCH 098/632] re-enable tests for asyncmy; fix Binary Fixed regression in asyncmy dialect caused by :ticket:`7567` where removal of the PyMySQL dependency broke binary columns, due to the asyncmy dialect not being properly included within CI tests. Fixes: #7593 Change-Id: Iefc1061c24c75fcb9ca1a02d0b5e5f43970ade17 (cherry picked from commit da128e11ff5fcaafbf80704dc0aa8da0a901fb3e) --- doc/build/changelog/unreleased_14/7593.rst | 7 ++++ lib/sqlalchemy/dialects/mysql/asyncmy.py | 12 +++++++ lib/sqlalchemy/testing/suite/test_types.py | 39 ++++++++++++++++++++++ test/requirements.py | 11 +++++- tox.ini | 2 +- 5 files changed, 69 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7593.rst diff --git a/doc/build/changelog/unreleased_14/7593.rst b/doc/build/changelog/unreleased_14/7593.rst new file mode 100644 index 00000000000..ebb3406ed74 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7593.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mysql, regression + :tickets: 7593 + + Fixed regression in asyncmy dialect caused by :ticket:`7567` where removal + of the PyMySQL dependency broke binary columns, due to the asyncmy dialect + not being properly included within CI tests. diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 16981fd98d9..521918a5a17 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -228,6 +228,11 @@ class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection): await_ = staticmethod(await_fallback) +def _Binary(x): + """Return x as a binary type.""" + return bytes(x) + + class AsyncAdapt_asyncmy_dbapi: def __init__(self, asyncmy): self.asyncmy = asyncmy @@ -250,6 +255,13 @@ def _init_dbapi_attributes(self): ): setattr(self, name, getattr(self.asyncmy.errors, name)) + STRING = util.symbol("STRING") + NUMBER = util.symbol("NUMBER") + BINARY = util.symbol("BINARY") + DATETIME = util.symbol("DATETIME") + TIMESTAMP = util.symbol("TIMESTAMP") + Binary = staticmethod(_Binary) + def connect(self, *arg, **kw): async_fallback = kw.pop("async_fallback", False) diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index 2fdea5e48e7..b96350ed077 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -41,6 +41,8 @@ from ... import util from ...orm import declarative_base from ...orm import Session +from ...sql.sqltypes import LargeBinary +from ...sql.sqltypes import PickleType from ...util import compat from ...util import u @@ -196,6 +198,42 @@ def test_null_strings_text(self, connection): self._test_null_strings(connection) +class BinaryTest(_LiteralRoundTripFixture, fixtures.TablesTest): + __requires__ = ("binary_literals",) + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "binary_table", + metadata, + Column( + "id", Integer, primary_key=True, test_needs_autoincrement=True + ), + Column("binary_data", LargeBinary), + Column("pickle_data", PickleType), + ) + + def test_binary_roundtrip(self, connection): + binary_table = self.tables.binary_table + + connection.execute( + binary_table.insert(), {"id": 1, "binary_data": b"this is binary"} + ) + row = connection.execute(select(binary_table.c.binary_data)).first() + eq_(row, (b"this is binary",)) + + def test_pickle_roundtrip(self, connection): + binary_table = self.tables.binary_table + + connection.execute( + binary_table.insert(), + {"id": 1, "pickle_data": {"foo": [1, 2, 3], "bar": "bat"}}, + ) + row = connection.execute(select(binary_table.c.pickle_data)).first() + eq_(row, ({"foo": [1, 2, 3], "bar": "bat"},)) + + class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest): __requires__ = ("text_type",) __backend__ = True @@ -1445,6 +1483,7 @@ def test_string_cast_crit_against_string_basic(self): __all__ = ( + "BinaryTest", "UnicodeVarcharTest", "UnicodeTextTest", "JSONTest", diff --git a/test/requirements.py b/test/requirements.py index bf83b83b48b..e587211ffac 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1136,7 +1136,16 @@ def datetime_implicit_bound(self): # pg8000 works in main / 2.0, support in 1.4 is not fully # present. return exclusions.skip_if("postgresql+pg8000") + exclusions.fails_on( - ["mysql", "mariadb"] + # mariadbconnector works. pyodbc we dont know, not supported in + # testing. + [ + "+mysqldb", + "+pymysql", + "+asyncmy", + "+mysqlconnector", + "+cymysql", + "+aiomysql", + ] ) @property diff --git a/tox.ini b/tox.ini index 0483ea759f2..505af98e80e 100644 --- a/tox.ini +++ b/tox.ini @@ -102,7 +102,7 @@ setenv= py2{,7}-mysql: MYSQL={env:TOX_MYSQL_PY2K:{env:TOX_MYSQL:--db mysql}} mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} - py3-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} + py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} mssql: MSSQL={env:TOX_MSSQL:--db mssql} From 8f96f104f3883ea2a7dc718711e9d566c611a6fc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jan 2022 20:30:24 -0500 Subject: [PATCH 099/632] typo Change-Id: Id3f1a99813be14dbfa721e6b0a6081eca4a90c05 (cherry picked from commit 7d9b811555a88dd2f1cb1520027546b87383e159) --- doc/build/changelog/unreleased_14/7591.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7591.rst b/doc/build/changelog/unreleased_14/7591.rst index 4ecf983d11f..5724a059dad 100644 --- a/doc/build/changelog/unreleased_14/7591.rst +++ b/doc/build/changelog/unreleased_14/7591.rst @@ -2,7 +2,7 @@ :tags: bug, orm :tickets: 7591 - Fixed issue in :meth:`_orm.Session.bulk_save_mappings` where the sorting + Fixed issue in :meth:`_orm.Session.bulk_save_objects` where the sorting that takes place when the ``preserve_order`` parameter is set to False would sort partially on ``Mapper`` objects, which is rejected in Python 3.11. From ab42b0e3d98386c8a13edea3206ef43f018de3b6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jan 2022 20:33:00 -0500 Subject: [PATCH 100/632] - 1.4.31 --- doc/build/changelog/changelog_14.rst | 40 +++++++++++++++++++++- doc/build/changelog/unreleased_14/7243.rst | 10 ------ doc/build/changelog/unreleased_14/7590.rst | 8 ----- doc/build/changelog/unreleased_14/7591.rst | 9 ----- doc/build/changelog/unreleased_14/7593.rst | 7 ---- doc/build/conf.py | 4 +-- 6 files changed, 41 insertions(+), 37 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7243.rst delete mode 100644 doc/build/changelog/unreleased_14/7590.rst delete mode 100644 doc/build/changelog/unreleased_14/7591.rst delete mode 100644 doc/build/changelog/unreleased_14/7593.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 32580a7e079..4c68a387e86 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,45 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.31 - :include_notes_from: unreleased_14 + :released: January 20, 2022 + + .. change:: + :tags: bug, postgresql, regression + :tickets: 7590 + + Fixed regression where the change in :ticket:`7148` to repair ENUM handling + in PostgreSQL broke the use case of an empty ARRAY of ENUM, preventing rows + that contained an empty array from being handled correctly when fetching + results. + + .. change:: + :tags: bug, orm + :tickets: 7591 + + Fixed issue in :meth:`_orm.Session.bulk_save_objects` where the sorting + that takes place when the ``preserve_order`` parameter is set to False + would sort partially on ``Mapper`` objects, which is rejected in Python + 3.11. + + + .. change:: + :tags: bug, mysql, regression + :tickets: 7593 + + Fixed regression in asyncmy dialect caused by :ticket:`7567` where removal + of the PyMySQL dependency broke binary columns, due to the asyncmy dialect + not being properly included within CI tests. + + .. change:: + :tags: mssql + :tickets: 7243 + + Added support for ``FILESTREAM`` when using ``VARBINARY(max)`` + in MSSQL. + + .. seealso:: + + :paramref:`_mssql.VARBINARY.filestream` .. changelog:: :version: 1.4.30 diff --git a/doc/build/changelog/unreleased_14/7243.rst b/doc/build/changelog/unreleased_14/7243.rst deleted file mode 100644 index b4661c2942c..00000000000 --- a/doc/build/changelog/unreleased_14/7243.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: mssql - :tickets: 7243 - - Added support for ``FILESTREAM`` when using ``VARBINARY(max)`` - in MSSQL. - - .. seealso:: - - :paramref:`_mssql.VARBINARY.filestream` diff --git a/doc/build/changelog/unreleased_14/7590.rst b/doc/build/changelog/unreleased_14/7590.rst deleted file mode 100644 index 822adf6dd05..00000000000 --- a/doc/build/changelog/unreleased_14/7590.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, postgresql, regression - :tickets: 7590 - - Fixed regression where the change in :ticket:`7148` to repair ENUM handling - in PostgreSQL broke the use case of an empty ARRAY of ENUM, preventing rows - that contained an empty array from being handled correctly when fetching - results. diff --git a/doc/build/changelog/unreleased_14/7591.rst b/doc/build/changelog/unreleased_14/7591.rst deleted file mode 100644 index 5724a059dad..00000000000 --- a/doc/build/changelog/unreleased_14/7591.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7591 - - Fixed issue in :meth:`_orm.Session.bulk_save_objects` where the sorting - that takes place when the ``preserve_order`` parameter is set to False - would sort partially on ``Mapper`` objects, which is rejected in Python - 3.11. - diff --git a/doc/build/changelog/unreleased_14/7593.rst b/doc/build/changelog/unreleased_14/7593.rst deleted file mode 100644 index ebb3406ed74..00000000000 --- a/doc/build/changelog/unreleased_14/7593.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mysql, regression - :tickets: 7593 - - Fixed regression in asyncmy dialect caused by :ticket:`7567` where removal - of the PyMySQL dependency broke binary columns, due to the asyncmy dialect - not being properly included within CI tests. diff --git a/doc/build/conf.py b/doc/build/conf.py index f964e14e391..67a629799b3 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -205,9 +205,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.30" +release = "1.4.31" -release_date = "January 19, 2022" +release_date = "January 20, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 328041d21d9f96db12caac79bbf7251a0dbd01e1 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 19 Jan 2022 23:41:36 +0100 Subject: [PATCH 101/632] Remove dispose warning on async engines when running tests Co-authored-by: Mike Bayer Change-Id: Ia3357959ed286dc7d2ce264b5ddcadf309351ff7 (cherry picked from commit 9b2cd1ede5951fff7180d64bb39aa3a601ec1900) --- lib/sqlalchemy/testing/engines.py | 7 +- test/ext/asyncio/test_engine_py3k.py | 135 +++++++++++++-------------- 2 files changed, 70 insertions(+), 72 deletions(-) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 2fb81a5bfa7..97809f4b9c2 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -17,6 +17,7 @@ from .util import gc_collect from .. import event from .. import pool +from ..util import await_only class ConnectionKiller(object): @@ -96,7 +97,10 @@ def _drop_testing_engines(self, scope): and proxy_ref._pool is rec.pool ): self._safe(proxy_ref._checkin) - rec.dispose() + if hasattr(rec, "sync_engine"): + await_only(rec.dispose()) + else: + rec.dispose() eng.clear() def after_test(self): @@ -310,6 +314,7 @@ def testing_engine( from sqlalchemy.pool import StaticPool if config.db is not None and isinstance(config.db.pool, StaticPool): + use_reaper = False engine.pool._transfer_from(config.db.pool) if scope == "global": diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index e88ef5464e8..b302c96cbdb 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -175,6 +175,11 @@ class EngineFixture(AsyncFixture, fixtures.TablesTest): def async_engine(self): return engines.testing_engine(asyncio=True, transfer_staticpool=True) + @testing.fixture + def async_connection(self, async_engine): + with async_engine.sync_engine.connect() as conn: + yield AsyncConnection(async_engine, conn) + @classmethod def define_tables(cls, metadata): Table( @@ -357,56 +362,53 @@ def test_execution_options(self, async_engine): @async_test async def test_proxied_attrs_connection(self, async_engine): - conn = await async_engine.connect() - - sync_conn = conn.sync_connection + async with async_engine.connect() as conn: + sync_conn = conn.sync_connection - is_(conn.engine, async_engine) - is_(conn.closed, sync_conn.closed) - is_(conn.dialect, async_engine.sync_engine.dialect) - eq_(conn.default_isolation_level, sync_conn.default_isolation_level) + is_(conn.engine, async_engine) + is_(conn.closed, sync_conn.closed) + is_(conn.dialect, async_engine.sync_engine.dialect) + eq_( + conn.default_isolation_level, sync_conn.default_isolation_level + ) @async_test - async def test_transaction_accessor(self, async_engine): - async with async_engine.connect() as conn: - is_none(conn.get_transaction()) - is_false(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + async def test_transaction_accessor(self, async_connection): + conn = async_connection + is_none(conn.get_transaction()) + is_false(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - trans = await conn.begin() + trans = await conn.begin() - is_true(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - is_( - trans.sync_transaction, conn.get_transaction().sync_transaction - ) + is_(trans.sync_transaction, conn.get_transaction().sync_transaction) - nested = await conn.begin_nested() + nested = await conn.begin_nested() - is_true(conn.in_transaction()) - is_true(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_true(conn.in_nested_transaction()) - is_( - conn.get_nested_transaction().sync_transaction, - nested.sync_transaction, - ) - eq_(conn.get_nested_transaction(), nested) + is_( + conn.get_nested_transaction().sync_transaction, + nested.sync_transaction, + ) + eq_(conn.get_nested_transaction(), nested) - is_( - trans.sync_transaction, conn.get_transaction().sync_transaction - ) + is_(trans.sync_transaction, conn.get_transaction().sync_transaction) - await nested.commit() + await nested.commit() - is_true(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_true(conn.in_transaction()) + is_false(conn.in_nested_transaction()) - await trans.rollback() + await trans.rollback() - is_none(conn.get_transaction()) - is_false(conn.in_transaction()) - is_false(conn.in_nested_transaction()) + is_none(conn.get_transaction()) + is_false(conn.in_transaction()) + is_false(conn.in_nested_transaction()) @testing.requires.queue_pool @async_test @@ -429,31 +431,26 @@ async def test_invalidate(self, async_engine): is_not(new_fairy, connection_fairy) is_(new_fairy.is_valid, True) is_(connection_fairy.is_valid, False) + await conn.close() @async_test - async def test_get_dbapi_connection_raise(self, async_engine): - - conn = await async_engine.connect() - + async def test_get_dbapi_connection_raise(self, async_connection): with testing.expect_raises_message( exc.InvalidRequestError, "AsyncConnection.connection accessor is not " "implemented as the attribute", ): - conn.connection + async_connection.connection @async_test - async def test_get_raw_connection(self, async_engine): + async def test_get_raw_connection(self, async_connection): - conn = await async_engine.connect() - - pooled = await conn.get_raw_connection() - is_(pooled, conn.sync_connection.connection) + pooled = await async_connection.get_raw_connection() + is_(pooled, async_connection.sync_connection.connection) @async_test - async def test_isolation_level(self, async_engine): - conn = await async_engine.connect() - + async def test_isolation_level(self, async_connection): + conn = async_connection sync_isolation_level = await greenlet_spawn( conn.sync_connection.get_isolation_level ) @@ -466,8 +463,6 @@ async def test_isolation_level(self, async_engine): eq_(isolation_level, "SERIALIZABLE") - await conn.close() - @testing.requires.queue_pool @async_test async def test_dispose(self, async_engine): @@ -490,9 +485,8 @@ async def test_dispose(self, async_engine): @testing.requires.independent_connections @async_test async def test_init_once_concurrency(self, async_engine): - c1 = async_engine.connect() - c2 = async_engine.connect() - await asyncio.wait([c1, c2]) + async with async_engine.connect() as c1, async_engine.connect() as c2: + await asyncio.wait([c1, c2]) @async_test async def test_connect_ctxmanager(self, async_engine): @@ -648,16 +642,15 @@ async def test_no_async_listeners(self, async_engine): ): event.listen(async_engine, "before_cursor_execute", mock.Mock()) - conn = await async_engine.connect() - - with testing.expect_raises_message( - NotImplementedError, - "asynchronous events are not implemented " - "at this time. Apply synchronous listeners to the " - "AsyncEngine.sync_engine or " - "AsyncConnection.sync_connection attributes.", - ): - event.listen(conn, "before_cursor_execute", mock.Mock()) + async with async_engine.connect() as conn: + with testing.expect_raises_message( + NotImplementedError, + "asynchronous events are not implemented " + "at this time. Apply synchronous listeners to the " + "AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes.", + ): + event.listen(conn, "before_cursor_execute", mock.Mock()) @async_test async def test_sync_before_cursor_execute_engine(self, async_engine): @@ -1071,16 +1064,16 @@ async def test_gc_conn(self, testing_engine): def test_regen_conn_but_not_engine(self, async_engine): - sync_conn = async_engine.sync_engine.connect() + with async_engine.sync_engine.connect() as sync_conn: - async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) - async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn) + async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) + async_conn2 = AsyncConnection._retrieve_proxy_for_target(sync_conn) - is_(async_conn, async_conn2) - is_(async_conn.engine, async_engine) + is_(async_conn, async_conn2) + is_(async_conn.engine, async_engine) - def test_regen_trans_but_not_conn(self, async_engine): - sync_conn = async_engine.sync_engine.connect() + def test_regen_trans_but_not_conn(self, connection_no_trans): + sync_conn = connection_no_trans async_conn = AsyncConnection._retrieve_proxy_for_target(sync_conn) From c98c0ec73cd22f43fabc6294a4de6c6b476708a2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 21 Jan 2022 10:19:02 -0500 Subject: [PATCH 102/632] Skip PK returned as None for RETURNING, server side default Fixed regression where the ORM exception that is to be raised when an INSERT silently fails to actually insert a row (such as from a trigger) would not be reached, due to a runtime exception raised ahead of time due to the missing primary key value, thus raising an uninformative exception rather than the correct one. For 1.4 and above, a new ``FlushError`` is added for this case that's raised earlier than the previous "null identity" exception was for 1.3, as a situation where the number of rows actually INSERTed does not match what was expected is a more critical situation in 1.4 as it prevents batching of multiple objects from working correctly. This is separate from the case where a newly fetched primary key is fetched as NULL, which continues to raise the existing "null identity" exception. Fixes: #7594 Change-Id: Ie8e181e3472f09f389cca757c5e58e61b15c7d79 (cherry picked from commit 8a1931601d3b105ad585ef39840c8251ebdb44a2) --- doc/build/changelog/unreleased_14/7594.rst | 16 ++++ lib/sqlalchemy/orm/persistence.py | 25 ++++++ test/orm/test_unitofwork.py | 99 ++++++++++++++++++++++ 3 files changed, 140 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7594.rst diff --git a/doc/build/changelog/unreleased_14/7594.rst b/doc/build/changelog/unreleased_14/7594.rst new file mode 100644 index 00000000000..427bac97e31 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7594.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7594 + + Fixed regression where the ORM exception that is to be raised when an + INSERT silently fails to actually insert a row (such as from a trigger) + would not be reached, due to a runtime exception raised ahead of time due + to the missing primary key value, thus raising an uninformative exception + rather than the correct one. For 1.4 and above, a new ``FlushError`` is + added for this case that's raised earlier than the previous "null identity" + exception was for 1.3, as a situation where the number of rows actually + INSERTed does not match what was expected is a more critical situation in + 1.4 as it prevents batching of multiple objects from working correctly. + This is separate from the case where a newly fetched primary key is + fetched as NULL, which continues to raise the existing "null identity" + exception. \ No newline at end of file diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index dc7b4012e75..f7b665becad 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1177,6 +1177,22 @@ def _emit_insert_statements( c.inserted_primary_key_rows, c.returned_defaults_rows or (), ): + if inserted_primary_key is None: + # this is a real problem and means that we didn't + # get back as many PK rows. we can't continue + # since this indicates PK rows were missing, which + # means we likely mis-populated records starting + # at that point with incorrectly matched PK + # values. + raise orm_exc.FlushError( + "Multi-row INSERT statement for %s did not " + "produce " + "the correct number of INSERTed rows for " + "RETURNING. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % mapper_rec + ) + for pk, col in zip( inserted_primary_key, mapper._pks_by_table[table], @@ -1225,6 +1241,15 @@ def _emit_insert_statements( ) primary_key = result.inserted_primary_key + if primary_key is None: + raise orm_exc.FlushError( + "Single-row INSERT statement for %s " + "did not produce a " + "new primary key result " + "being invoked. Ensure there are no triggers or " + "special driver issues preventing INSERT from " + "functioning properly." % (mapper_rec,) + ) for pk, col in zip( primary_key, mapper._pks_by_table[table] ): diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py index 7b7cb0db6fb..77eaa1a8c20 100644 --- a/test/orm/test_unitofwork.py +++ b/test/orm/test_unitofwork.py @@ -25,6 +25,8 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_true +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import Conditional @@ -3499,6 +3501,103 @@ def test_dont_complain_if_no_update(self): s.commit() +class NoRowInsertedTest(fixtures.TestBase): + """test #7594. + + failure modes when INSERT doesnt actually insert a row. + """ + + __backend__ = True + __requires__ = ("returning",) + + @testing.fixture + @testing.skip_if( + "+asyncpg", + "1.4's asyncpg architecture doesn't let us change parameters", + ) + def null_server_default_fixture(self, registry, connection): + @registry.mapped + class MyClass(object): + __tablename__ = "my_table" + + id = Column(Integer, primary_key=True) + data = Column(String(50)) + + registry.metadata.create_all(connection) + + @event.listens_for(connection, "before_cursor_execute", retval=True) + def revert_insert( + conn, cursor, statement, parameters, context, executemany + ): + if statement.startswith("INSERT"): + if statement.endswith("RETURNING my_table.id"): + if executemany: + # remove some rows, so the count is wrong + parameters = parameters[0:1] + else: + # statement should return no rows + statement = ( + "UPDATE my_table SET id=NULL WHERE 1!=1 " + "RETURNING my_table.id" + ) + parameters = {} + else: + assert not testing.against( + "postgresql" + ), "this test has to at least run on PostgreSQL" + testing.config.skip_test( + "backend doesn't support the expected form of " + "RETURNING for this test to work" + ) + return statement, parameters + + return MyClass + + def test_insert_single_no_pk_correct_exception( + self, null_server_default_fixture, connection + ): + MyClass = null_server_default_fixture + + sess = fixture_session(bind=connection) + + m1 = MyClass(data="data") + sess.add(m1) + + with expect_raises_message( + orm_exc.FlushError, + "Single-row INSERT statement for .*MyClass.* did not produce", + ): + sess.flush() + + is_true(inspect(m1).transient) + sess.rollback() + is_true(inspect(m1).transient) + + def test_insert_multi_no_pk_correct_exception( + self, null_server_default_fixture, connection + ): + MyClass = null_server_default_fixture + + sess = fixture_session(bind=connection) + + m1, m2, m3 = MyClass(data="d1"), MyClass(data="d2"), MyClass(data="d3") + sess.add_all([m1, m2, m3]) + + is_multi_row = connection.dialect.insert_executemany_returning + with expect_raises_message( + orm_exc.FlushError, + "%s INSERT statement for .*MyClass.* did not produce" + % ("Multi-row" if is_multi_row else "Single-row"), + ): + sess.flush() + + for m in m1, m2, m3: + is_true(inspect(m).transient) + sess.rollback() + for m in m1, m2, m3: + is_true(inspect(m).transient) + + class EnsurePKSortableTest(fixtures.MappedTest): class SomeEnum(object): # Implements PEP 435 in the minimal fashion needed by SQLAlchemy From 4a99fc26ab5f95ef605d975b7c8d003dc28c205b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 21 Jan 2022 18:46:37 -0500 Subject: [PATCH 103/632] dont use exception catches for warnings; modernize xdist detection Improvements to the test suite's integration with pytest such that the "warnings" plugin, if manually enabled, will not interfere with the test suite, such that third parties can enable the warnings plugin or make use of the ``-W`` parameter and SQLAlchemy's test suite will continue to pass. Additionally, modernized the detection of the "pytest-xdist" plugin so that plugins can be globally disabled using PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 without breaking the test suite if xdist were still installed. Warning filters that promote deprecation warnings to errors are now localized to SQLAlchemy-specific warnings, or within SQLAlchemy-specific sources for general Python deprecation warnings, so that non-SQLAlchemy deprecation warnings emitted from pytest plugins should also not impact the test suite. Identified a bit of cleanup for the PostgreSQL provisioning as a result. Fixes: #7599 Change-Id: Ibcf09af25228d39ee5a943fda82d8a9302433726 (cherry picked from commit a0f1914b903de6c130ab1c3267138b8ad208e144) --- doc/build/changelog/unreleased_14/7599.rst | 16 +++++ .../dialects/postgresql/provision.py | 5 +- lib/sqlalchemy/testing/__init__.py | 2 + lib/sqlalchemy/testing/assertions.py | 59 +++++++++++++++++-- lib/sqlalchemy/testing/plugin/pytestplugin.py | 19 +++--- lib/sqlalchemy/testing/warnings.py | 32 +++++----- test/dialect/oracle/test_reflection.py | 6 +- test/dialect/postgresql/test_reflection.py | 3 +- test/engine/test_pool.py | 4 +- test/engine/test_reconnect.py | 8 ++- test/orm/declarative/test_basic.py | 37 ++++++------ test/orm/declarative/test_clsregistry.py | 17 +++--- test/orm/declarative/test_mixin.py | 11 ++-- test/orm/inheritance/test_basic.py | 15 ++--- test/orm/test_attributes.py | 7 ++- test/orm/test_cascade.py | 33 ++++++----- test/orm/test_dynamic.py | 3 +- test/orm/test_eager_relations.py | 4 +- test/orm/test_events.py | 19 ++++-- test/orm/test_instrumentation.py | 4 +- test/orm/test_lazy_relations.py | 3 +- test/orm/test_mapper.py | 5 +- test/orm/test_query.py | 5 +- test/orm/test_rel_fn.py | 13 ++-- test/orm/test_relationships.py | 17 +++--- test/orm/test_scoping.py | 3 +- test/orm/test_selectin_relations.py | 4 +- test/orm/test_session.py | 13 ++-- test/orm/test_subquery_relations.py | 4 +- test/orm/test_unitofworkv2.py | 7 ++- test/orm/test_versioning.py | 6 +- test/sql/test_defaults.py | 3 +- test/sql/test_metadata.py | 46 ++++++++------- test/sql/test_operators.py | 10 ++-- test/sql/test_types.py | 7 ++- 35 files changed, 273 insertions(+), 177 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7599.rst diff --git a/doc/build/changelog/unreleased_14/7599.rst b/doc/build/changelog/unreleased_14/7599.rst new file mode 100644 index 00000000000..db69ace4663 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7599.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, tests + :tickets: 7599 + + Improvements to the test suite's integration with pytest such that the + "warnings" plugin, if manually enabled, will not interfere with the test + suite, such that third parties can enable the warnings plugin or make use + of the ``-W`` parameter and SQLAlchemy's test suite will continue to pass. + Additionally, modernized the detection of the "pytest-xdist" plugin so that + plugins can be globally disabled using PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 + without breaking the test suite if xdist were still installed. Warning + filters that promote deprecation warnings to errors are now localized to + SQLAlchemy-specific warnings, or within SQLAlchemy-specific sources for + general Python deprecation warnings, so that non-SQLAlchemy deprecation + warnings emitted from pytest plugins should also not impact the test suite. + diff --git a/lib/sqlalchemy/dialects/postgresql/provision.py b/lib/sqlalchemy/dialects/postgresql/provision.py index 68a01e483c2..98470f36eb9 100644 --- a/lib/sqlalchemy/dialects/postgresql/provision.py +++ b/lib/sqlalchemy/dialects/postgresql/provision.py @@ -19,10 +19,7 @@ def _pg_create_db(cfg, eng, ident): template_db = cfg.options.postgresql_templatedb with eng.execution_options(isolation_level="AUTOCOMMIT").begin() as conn: - try: - _pg_drop_db(cfg, conn, ident) - except Exception: - pass + if not template_db: template_db = conn.exec_driver_sql( "select current_database()" diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index 30babae83de..80d344faf1e 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -12,6 +12,8 @@ from .assertions import assert_raises_context_ok from .assertions import assert_raises_message from .assertions import assert_raises_message_context_ok +from .assertions import assert_warns +from .assertions import assert_warns_message from .assertions import AssertsCompiledSQL from .assertions import AssertsExecutionResults from .assertions import ComparesTables diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 02e68802282..aa8edd9affb 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -143,14 +143,16 @@ def _expect_warnings( exc_cls, messages, regex=True, + search_msg=False, assert_=True, py2konly=False, raise_on_any_unexpected=False, + squelch_other_warnings=False, ): global _FILTERS, _SEEN, _EXC_CLS - if regex: + if regex or search_msg: filters = [re.compile(msg, re.I | re.S) for msg in messages] else: filters = list(messages) @@ -188,19 +190,23 @@ def our_warn(msg, *arg, **kw): exception = None if not exception or not issubclass(exception, _EXC_CLS): - return real_warn(msg, *arg, **kw) + if not squelch_other_warnings: + return real_warn(msg, *arg, **kw) if not filters and not raise_on_any_unexpected: return for filter_ in filters: - if (regex and filter_.match(msg)) or ( - not regex and filter_ == msg + if ( + (search_msg and filter_.search(msg)) + or (regex and filter_.match(msg)) + or (not regex and filter_ == msg) ): seen.discard(filter_) break else: - real_warn(msg, *arg, **kw) + if not squelch_other_warnings: + real_warn(msg, *arg, **kw) with mock.patch("warnings.warn", our_warn), mock.patch( "sqlalchemy.util.SQLALCHEMY_WARN_20", True @@ -357,6 +363,40 @@ def assert_raises_message(except_cls, msg, callable_, *args, **kwargs): ) +def assert_warns(except_cls, callable_, *args, **kwargs): + """legacy adapter function for functions that were previously using + assert_raises with SAWarning or similar. + + has some workarounds to accommodate the fact that the callable completes + with this approach rather than stopping at the exception raise. + + + """ + with _expect_warnings(except_cls, [".*"], squelch_other_warnings=True): + return callable_(*args, **kwargs) + + +def assert_warns_message(except_cls, msg, callable_, *args, **kwargs): + """legacy adapter function for functions that were previously using + assert_raises with SAWarning or similar. + + has some workarounds to accommodate the fact that the callable completes + with this approach rather than stopping at the exception raise. + + Also uses regex.search() to match the given message to the error string + rather than regex.match(). + + """ + with _expect_warnings( + except_cls, + [msg], + search_msg=True, + regex=False, + squelch_other_warnings=True, + ): + return callable_(*args, **kwargs) + + def assert_raises_message_context_ok( except_cls, msg, callable_, *args, **kwargs ): @@ -378,6 +418,15 @@ class _ErrorContainer(object): @contextlib.contextmanager def _expect_raises(except_cls, msg=None, check_context=False): + if ( + isinstance(except_cls, type) + and issubclass(except_cls, Warning) + or isinstance(except_cls, Warning) + ): + raise TypeError( + "Use expect_warnings for warnings, not " + "expect_raises / assert_raises" + ) ec = _ErrorContainer() if check_context: are_we_already_in_a_traceback = sys.exc_info()[0] diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py index 41e68307063..5a51582925d 100644 --- a/lib/sqlalchemy/testing/plugin/pytestplugin.py +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -14,16 +14,10 @@ import os import re import sys +import uuid import pytest -try: - import xdist # noqa - - has_xdist = True -except ImportError: - has_xdist = False - py2k = sys.version_info < (3, 0) if py2k: @@ -84,6 +78,9 @@ def __call__( def pytest_configure(config): + if config.pluginmanager.hasplugin("xdist"): + config.pluginmanager.register(XDistHooks()) + if hasattr(config, "workerinput"): plugin_base.restore_important_follower_config(config.workerinput) plugin_base.configure_follower(config.workerinput["follower_ident"]) @@ -157,10 +154,8 @@ def _filter(filename): collect_types.init_types_collection(filter_filename=_filter) -if has_xdist: - import uuid - - def pytest_configure_node(node): +class XDistHooks(object): + def pytest_configure_node(self, node): from sqlalchemy.testing import provision from sqlalchemy.testing import asyncio @@ -175,7 +170,7 @@ def pytest_configure_node(node): provision.create_follower_db, node.workerinput["follower_ident"] ) - def pytest_testnodedown(node, error): + def pytest_testnodedown(self, node, error): from sqlalchemy.testing import provision from sqlalchemy.testing import asyncio diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index db780f40030..3e783872d62 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -14,8 +14,13 @@ from ..util.langhelpers import _warnings_warn -class SATestSuiteWarning(sa_exc.SAWarning): - """warning for a condition detected during tests that is non-fatal""" +class SATestSuiteWarning(Warning): + """warning for a condition detected during tests that is non-fatal + + Currently outside of SAWarning so that we can work around tools like + Alembic doing the wrong thing with warnings. + + """ def warn_test_suite(message): @@ -25,28 +30,21 @@ def warn_test_suite(message): def setup_filters(): """Set global warning behavior for the test suite.""" + # TODO: at this point we can use the normal pytest warnings plugin, + # if we decide the test suite can be linked to pytest only + + origin = r"^(?:test|sqlalchemy)\..*" + warnings.filterwarnings( "ignore", category=sa_exc.SAPendingDeprecationWarning ) warnings.filterwarnings("error", category=sa_exc.SADeprecationWarning) warnings.filterwarnings("error", category=sa_exc.SAWarning) - warnings.filterwarnings("always", category=SATestSuiteWarning) - # some selected deprecations... - warnings.filterwarnings("error", category=DeprecationWarning) - warnings.filterwarnings( - "ignore", category=DeprecationWarning, message=r".*StopIteration" - ) - warnings.filterwarnings( - "ignore", - category=DeprecationWarning, - message=r".*inspect.get.*argspec", - ) + warnings.filterwarnings("always", category=SATestSuiteWarning) warnings.filterwarnings( - "ignore", - category=DeprecationWarning, - message="The loop argument is deprecated", + "error", category=DeprecationWarning, module=origin ) # ignore things that are deprecated *as of* 2.0 :) @@ -67,7 +65,7 @@ def setup_filters(): pass else: warnings.filterwarnings( - "once", category=pytest.PytestDeprecationWarning + "once", category=pytest.PytestDeprecationWarning, module=origin ) diff --git a/test/dialect/oracle/test_reflection.py b/test/dialect/oracle/test_reflection.py index acf7d75d549..b287e1024de 100644 --- a/test/dialect/oracle/test_reflection.py +++ b/test/dialect/oracle/test_reflection.py @@ -23,7 +23,7 @@ from sqlalchemy.dialects.oracle.base import BINARY_FLOAT from sqlalchemy.dialects.oracle.base import DOUBLE_PRECISION from sqlalchemy.dialects.oracle.base import NUMBER -from sqlalchemy.testing import assert_raises +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -352,7 +352,7 @@ def test_oracle_has_no_on_update_cascade(self, connection): "foo_id", Integer, ForeignKey("foo.id", onupdate="CASCADE") ), ) - assert_raises(exc.SAWarning, bar.create, connection) + assert_warns(exc.SAWarning, bar.create, connection) bat = Table( "bat", @@ -361,7 +361,7 @@ def test_oracle_has_no_on_update_cascade(self, connection): Column("foo_id", Integer), ForeignKeyConstraint(["foo_id"], ["foo.id"], onupdate="CASCADE"), ) - assert_raises(exc.SAWarning, bat.create, connection) + assert_warns(exc.SAWarning, bat.create, connection) def test_reflect_check_include_all(self, connection): insp = inspect(connection) diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index 5ee11ccd8c9..3502c745b26 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -34,6 +34,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises +from sqlalchemy.testing.assertions import assert_warns from sqlalchemy.testing.assertions import AssertsExecutionResults from sqlalchemy.testing.assertions import eq_ from sqlalchemy.testing.assertions import is_ @@ -486,7 +487,7 @@ def test_unknown_types(self, connection): base.PGDialect.ischema_names = {} try: m2 = MetaData() - assert_raises( + assert_warns( exc.SAWarning, Table, "testtable", m2, autoload_with=connection ) diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 43ec9cc3ffa..320a9bb5854 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -14,7 +14,7 @@ from sqlalchemy.pool.base import _ConnDialect from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_context_ok -from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises from sqlalchemy.testing import fixtures @@ -1821,7 +1821,7 @@ def test_no_double_checkin(self): c1 = p.connect() rec = c1._connection_record c1.close() - assert_raises_message( + assert_warns_message( Warning, "Double checkin attempted on %s" % rec, rec.checkin ) diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index 51da845b397..9579d6c2dc9 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -15,6 +15,7 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import assert_raises_message_context_ok +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises @@ -967,6 +968,7 @@ def get_default_schema_name(connection): util.warn("Exception attempting to detect") eng.dialect._get_default_schema_name = get_default_schema_name + eng.dialect._check_unicode_description = mock.Mock() return eng def test_cursor_explode(self): @@ -982,11 +984,13 @@ def test_cursor_explode(self): def test_cursor_shutdown_in_initialize(self): db = self._fixture(True, True) - assert_raises_message_context_ok( + assert_warns_message( exc.SAWarning, "Exception attempting to detect", db.connect ) + # there's legacy py2k stuff happening here making this + # less smooth and probably buggy eq_( - db.pool.logger.error.mock_calls, + db.pool.logger.error.mock_calls[0:1], [call("Error closing cursor", exc_info=True)], ) diff --git a/test/orm/declarative/test_basic.py b/test/orm/declarative/test_basic.py index a405b9f2c6a..2ab787aa676 100644 --- a/test/orm/declarative/test_basic.py +++ b/test/orm/declarative/test_basic.py @@ -406,7 +406,13 @@ class Foo4(MyMixin2, Base): id = Column(Integer, primary_key=True) def test_column_named_twice(self): - def go(): + with assertions.expect_deprecated( + "A column with name 'x' is already present in table 'foo'" + ), expect_warnings( + "On class 'Foo', Column object 'x' named directly multiple times, " + "only one will be used: x, y", + ): + class Foo(Base): __tablename__ = "foo" @@ -414,15 +420,14 @@ class Foo(Base): x = Column("x", Integer) y = Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, + def test_column_repeated_under_prop(self): + with assertions.expect_deprecated( + "A column with name 'x' is already present in table 'foo'" + ), expect_warnings( "On class 'Foo', Column object 'x' named directly multiple times, " - "only one will be used: x, y", - go, - ) + "only one will be used: x, y, z", + ): - def test_column_repeated_under_prop(self): - def go(): class Foo(Base): __tablename__ = "foo" @@ -431,13 +436,6 @@ class Foo(Base): y = column_property(x) z = Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, - "On class 'Foo', Column object 'x' named directly multiple times, " - "only one will be used: x, y, z", - go, - ) - def test_using_explicit_prop_in_schema_objects(self): class Foo(Base): __tablename__ = "foo" @@ -2200,15 +2198,14 @@ class Test(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) - assert_raises_message( - sa.exc.SAWarning, + with expect_warnings( "This declarative base already contains a class with ", - lambda: type(Base)( + ): + type(Base)( "Test", (Base,), dict(__tablename__="b", id=Column(Integer, primary_key=True)), - ), - ) + ) @testing.teardown_events(MapperEvents) @testing.teardown_events(InstrumentationEvents) diff --git a/test/orm/declarative/test_clsregistry.py b/test/orm/declarative/test_clsregistry.py index b9d41ee5325..b77a101e8e1 100644 --- a/test/orm/declarative/test_clsregistry.py +++ b/test/orm/declarative/test_clsregistry.py @@ -7,6 +7,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.util import gc_collect @@ -34,16 +35,16 @@ def test_same_module_same_name(self): clsregistry.add_class("Foo", f1, base._class_registry) gc_collect() - assert_raises_message( - exc.SAWarning, + with expect_warnings( "This declarative base already contains a class with the " "same class name and module name as foo.bar.Foo, and " - "will be replaced in the string-lookup table.", - clsregistry.add_class, - "Foo", - f2, - base._class_registry, - ) + "will be replaced in the string-lookup table." + ): + clsregistry.add_class( + "Foo", + f2, + base._class_registry, + ) def test_resolve(self): base = registry() diff --git a/test/orm/declarative/test_mixin.py b/test/orm/declarative/test_mixin.py index f3feb5ddf23..5a4673a23ed 100644 --- a/test/orm/declarative/test_mixin.py +++ b/test/orm/declarative/test_mixin.py @@ -1856,14 +1856,11 @@ class Mixin(object): def my_prop(cls): return Column("x", Integer) - assert_raises_message( - sa.exc.SAWarning, + with expect_warnings( "Unmanaged access of declarative attribute my_prop " - "from non-mapped class Mixin", - getattr, - Mixin, - "my_prop", - ) + "from non-mapped class Mixin" + ): + Mixin.my_prop def test_can_we_access_the_mixin_straight_special_names(self): class Mixin(object): diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index 446a9d9bd92..600726ce38e 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -35,6 +35,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import assert_warns_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import Conditional @@ -903,7 +904,7 @@ def test_invalid_assignment_downwards(self): c1 = C() c1.class_name = "b" sess.add(c1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " @@ -922,7 +923,7 @@ def test_invalid_assignment_upwards(self): b1 = B() b1.class_name = "c" sess.add(b1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'c'; the object may not " @@ -938,7 +939,7 @@ def test_entirely_oob_assignment(self): b1 = B() b1.class_name = "xyz" sess.add(b1) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'xyz'; the object may not " @@ -968,7 +969,7 @@ def test_validate_on_upate(self): sess.expire(c1) c1.class_name = "b" - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Flushing object %s with incompatible " "polymorphic identity 'b'; the object may not " @@ -2143,7 +2144,7 @@ def test_explicit_composite_pk(self): properties=dict(id=[employee_table.c.eid, person_table.c.id]), primary_key=[person_table.c.id, employee_table.c.eid], ) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"On mapper mapped class Employee->employees, " "primary key column 'persons.id' is being " @@ -2441,7 +2442,7 @@ def go(): Sub, subtable_two, inherits=Base ) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Implicitly combining column base.base_id with " "column subtable_two.base_id under attribute 'base_id'", @@ -3093,7 +3094,7 @@ class C(P): pass self.mapper_registry.map_imperatively(P, parent) - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "Could not assemble any primary keys for locally mapped " "table 'child' - no rows will be persisted in this Table.", diff --git a/test/orm/test_attributes.py b/test/orm/test_attributes.py index 130e9807839..70dc0a1295d 100644 --- a/test/orm/test_attributes.py +++ b/test/orm/test_attributes.py @@ -17,6 +17,7 @@ from sqlalchemy.testing import is_not from sqlalchemy.testing import is_true from sqlalchemy.testing import not_in +from sqlalchemy.testing.assertions import assert_warns from sqlalchemy.testing.mock import call from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.util import all_partial_orderings @@ -3769,7 +3770,7 @@ def test_expired(self): a1.bs.append(B()) state = attributes.instance_state(a1) state._expire(state.dict, set()) - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) def test_replaced(self): A, B = self.A, self.B @@ -3790,7 +3791,7 @@ def test_pop_existing(self): a1.bs.append(B()) state = attributes.instance_state(a1) state._reset(state.dict, "bs") - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) def test_ad_hoc_lazy(self): A, B = self.A, self.B @@ -3799,4 +3800,4 @@ def test_ad_hoc_lazy(self): a1.bs.append(B()) state = attributes.instance_state(a1) _set_callable(state, state.dict, "bs", lambda: B()) - assert_raises(Warning, coll.append, B()) + assert_warns(Warning, coll.append, B()) diff --git a/test/orm/test_cascade.py b/test/orm/test_cascade.py index cd7e7c111a3..c5dd946e75e 100644 --- a/test/orm/test_cascade.py +++ b/test/orm/test_cascade.py @@ -24,6 +24,7 @@ from sqlalchemy.orm.decl_api import declarative_base from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ @@ -94,7 +95,7 @@ def test_delete_with_passive_deletes_all(self): def test_delete_orphan_without_delete(self): Address = self.classes.Address - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "The 'delete-orphan' cascade option requires 'delete'.", relationship, @@ -1221,7 +1222,7 @@ def test_o2m_only_child_transient(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_only_child_persistent(self): User, Address = self.classes.User, self.classes.Address @@ -1239,7 +1240,7 @@ def test_o2m_only_child_persistent(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address @@ -1265,7 +1266,7 @@ def test_o2m_backref_child_transient(self): sess.add(u1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_transient_nochange(self): User, Address = self.classes.User, self.classes.Address @@ -1301,7 +1302,7 @@ def test_o2m_backref_child_expunged(self): sess.expunge(a1) assert u1 in sess assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_o2m_backref_child_expunged_nochange(self): User, Address = self.classes.User, self.classes.Address @@ -1350,7 +1351,7 @@ def test_m2o_only_child_transient(self): sess.add(a1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_only_child_expunged(self): User, Address = self.classes.User, self.classes.Address @@ -1367,7 +1368,7 @@ def test_m2o_only_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_backref_child_pending(self): User, Address = self.classes.User, self.classes.Address @@ -1393,7 +1394,7 @@ def test_m2o_backref_child_transient(self): sess.add(a1) assert u1 not in sess assert a1 in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2o_backref_child_expunged(self): User, Address = self.classes.User, self.classes.Address @@ -1420,7 +1421,7 @@ def test_m2o_backref_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "not in session", sess.flush ) @@ -1440,7 +1441,7 @@ def test_m2o_backref_future_child_expunged(self): sess.expunge(u1) assert u1 not in sess assert a1 in sess - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, "not in session", sess.flush ) @@ -1544,7 +1545,7 @@ def test_m2m_only_child_transient(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_only_child_persistent(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1562,7 +1563,7 @@ def test_m2m_only_child_persistent(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_pending(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1588,7 +1589,7 @@ def test_m2m_backref_child_transient(self): sess.add(i1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_transient_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -1624,7 +1625,7 @@ def test_m2m_backref_child_expunged(self): sess.expunge(k1) assert i1 in sess assert k1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.flush) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.flush) def test_m2m_backref_child_expunged_nochange(self): Item, Keyword = self.classes.Item, self.classes.Keyword @@ -2808,7 +2809,7 @@ def test_o2m_commit_warns(self): a1 = Address(email_address="a1") a1.user = u1 - assert_raises_message(sa_exc.SAWarning, "not in session", sess.commit) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit) assert a1 not in sess @@ -2871,7 +2872,7 @@ def test_m2o_commit_warns(self): a1.dingalings.append(d1) assert a1 not in sess - assert_raises_message(sa_exc.SAWarning, "not in session", sess.commit) + assert_warns_message(sa_exc.SAWarning, "not in session", sess.commit) class PendingOrphanTestSingleLevel(fixtures.MappedTest): diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py index 8efd4523820..2a8e3e2dc40 100644 --- a/test/orm/test_dynamic.py +++ b/test/orm/test_dynamic.py @@ -15,6 +15,7 @@ from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises_message @@ -321,7 +322,7 @@ def test_no_m2o_w_uselist(self): }, ) self.mapper_registry.map_imperatively(User, users) - assert_raises_message( + assert_warns_message( exc.SAWarning, "On relationship Address.user, 'dynamic' loaders cannot be " "used with many-to-one/one-to-one relationships and/or " diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index d9da36073c0..879cc2b8172 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -31,8 +31,8 @@ from sqlalchemy.orm import undefer from sqlalchemy.sql import operators from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures @@ -2142,7 +2142,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(joinedload(User.order)).all ) diff --git a/test/orm/test_events.py b/test/orm/test_events.py index 4dfea6a6de8..c0fbaba7d6f 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -32,8 +32,10 @@ from sqlalchemy.sql.traversals import NO_CACHE from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_not @@ -1061,7 +1063,7 @@ def test_before_after_configured_warn_on_non_mapper(self): m1 = Mock() self.mapper_registry.map_imperatively(User, users) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"before_configured' and 'after_configured' ORM events only " r"invoke with the mapper\(\) function or Mapper class as " @@ -1072,7 +1074,7 @@ def test_before_after_configured_warn_on_non_mapper(self): m1, ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"before_configured' and 'after_configured' ORM events only " r"invoke with the mapper\(\) function or Mapper class as " @@ -2199,7 +2201,12 @@ def test_rollback_hook(self): u2 = User(name="u1", id=1) sess.add(u2) - assert_raises(sa.exc.SAWarning, sess.commit) + + with expect_raises(sa.exc.IntegrityError), expect_warnings( + "New instance" + ): + sess.commit() + sess.rollback() eq_( canary, @@ -2251,7 +2258,11 @@ def do_something(session, previous_transaction): u2 = User(name="u1", id=1) sess.add(u2) - assert_raises(sa.exc.SAWarning, sess.commit) + with expect_raises(sa.exc.IntegrityError), expect_warnings( + "New instance" + ): + sess.commit() + sess.rollback() eq_(assertions, [True, True]) diff --git a/test/orm/test_instrumentation.py b/test/orm/test_instrumentation.py index a2d4aa9cac2..73aefe1f05a 100644 --- a/test/orm/test_instrumentation.py +++ b/test/orm/test_instrumentation.py @@ -10,7 +10,7 @@ from sqlalchemy.orm import instrumentation from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises -from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import ne_ @@ -525,7 +525,7 @@ class A(object): def __del__(self): pass - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, r"__del__\(\) method on class " r" will cause " diff --git a/test/orm/test_lazy_relations.py b/test/orm/test_lazy_relations.py index 3ebff5f43bc..ee578ff50d1 100644 --- a/test/orm/test_lazy_relations.py +++ b/test/orm/test_lazy_relations.py @@ -25,6 +25,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import with_parent from sqlalchemy.testing import assert_raises +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -369,7 +370,7 @@ def test_uselist_false_warning(self): self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() u1 = s.query(User).filter(User.id == 7).one() - assert_raises(sa.exc.SAWarning, getattr, u1, "order") + assert_warns(sa.exc.SAWarning, getattr, u1, "order") def test_callable_bind(self): Address, addresses, users, User = ( diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index 11a762e60b1..fa837c678f7 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -33,6 +33,7 @@ from sqlalchemy.orm.persistence import _sort_states from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated_20 @@ -832,7 +833,7 @@ def test_replace_rel_prop_with_rel_warns(self): ) self.mapper(Address, addresses) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Property User.addresses on Mapper|User|users being replaced " "with new property User.addresses; the old property will " @@ -1015,7 +1016,7 @@ class MyUser(User): polymorphic_on=users.c.name, polymorphic_identity="user", ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Reassigning polymorphic association for identity 'user'", self.mapper_registry.map_imperatively, diff --git a/test/orm/test_query.py b/test/orm/test_query.py index a1dbb2f617b..6d9aee584af 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -72,6 +72,7 @@ from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message +from sqlalchemy.testing.assertions import assert_warns_message from sqlalchemy.testing.assertions import eq_ from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertions import expect_warnings @@ -1315,7 +1316,7 @@ def test_get_fully_null_pk(self): User = self.classes.User s = fixture_session() - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"fully NULL primary key identity cannot load any object. " "This condition may raise an error in a future release.", @@ -1329,7 +1330,7 @@ def test_get_fully_null_composite_pk(self, outerjoin_mapping): s = fixture_session() - assert_raises_message( + assert_warns_message( sa_exc.SAWarning, r"fully NULL primary key identity cannot load any object. " "This condition may raise an error in a future release.", diff --git a/test/orm/test_rel_fn.py b/test/orm/test_rel_fn.py index 6f6b0d56dfe..4d8eb88b91c 100644 --- a/test/orm/test_rel_fn.py +++ b/test/orm/test_rel_fn.py @@ -17,11 +17,13 @@ from sqlalchemy.orm.interfaces import MANYTOONE from sqlalchemy.orm.interfaces import ONETOMANY from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_raises_message class _JoinFixtures(object): @@ -573,7 +575,7 @@ def _join_fixture_inh_selfref_w_entity(self, **kw): ) def _assert_non_simple_warning(self, fn): - assert_raises_message( + assert_warns_message( exc.SAWarning, "Non-simple column elements in " "primary join condition for property " @@ -818,9 +820,12 @@ def test_determine_local_remote_pairs_o2m_composite_selfref_func_rs(self): self._join_fixture_o2m_composite_selfref_func_remote_side() def test_determine_local_remote_pairs_o2m_overlap_func_warning(self): - self._assert_non_simple_warning( - self._join_fixture_m2o_sub_to_joined_sub_func - ) + with expect_raises_message( + exc.ArgumentError, "Could not locate any relevant" + ): + self._assert_non_simple_warning( + self._join_fixture_m2o_sub_to_joined_sub_func + ) def test_determine_local_remote_pairs_o2m_composite_selfref_func_annotated( self, diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py index 98de9abad73..acb22ce0f8d 100644 --- a/test/orm/test_relationships.py +++ b/test/orm/test_relationships.py @@ -33,12 +33,13 @@ from sqlalchemy.orm.interfaces import ONETOMANY from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import is_ -from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertsql import assert_engine from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session @@ -872,7 +873,7 @@ def _test_fixture_one_run(self, **kw): @testing.provide_metadata def test_simple_warn(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:Child.parent|Parent.children)' will copy " r"column parent.id to column child.parent_id, which conflicts " @@ -963,7 +964,7 @@ def test_simple_overlaps_works(self): @testing.provide_metadata def test_double_rel_same_mapper_warns(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship 'Parent.child[12]' will copy column parent.id to " r"column child.parent_id, which conflicts with relationship\(s\): " @@ -983,7 +984,7 @@ def test_double_rel_aliased_mapper_works(self): @testing.provide_metadata def test_warn_one(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|BSub2.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -994,7 +995,7 @@ def test_warn_one(self): @testing.provide_metadata def test_warn_two(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|B.a_member)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1005,7 +1006,7 @@ def test_warn_two(self): @testing.provide_metadata def test_warn_three(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:BSub1.a|B.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1017,7 +1018,7 @@ def test_warn_three(self): @testing.provide_metadata def test_warn_four(self): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship '(?:B.a|BSub2.a_member|B.a)' will copy column " r"(?:a.id|a_member.a_id) to column b.a_id", @@ -1301,7 +1302,7 @@ def test_overlapping_warning(self): }, ) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"relationship .* will copy column .* to column " r"employee_t.company_id, which conflicts with relationship\(s\)", diff --git a/test/orm/test_scoping.py b/test/orm/test_scoping.py index e23f42ac53e..b2389ced308 100644 --- a/test/orm/test_scoping.py +++ b/test/orm/test_scoping.py @@ -9,6 +9,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import sessionmaker from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -101,7 +102,7 @@ def test_config_errors(self): bind=testing.db, ) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "At least one scoped session is already present. ", Session.configure, diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index 7a5bb0e7edb..3e44abe88f5 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -18,8 +18,8 @@ from sqlalchemy.orm import subqueryload from sqlalchemy.orm import undefer from sqlalchemy.orm import with_polymorphic -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -1458,7 +1458,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(selectinload(User.order)).all, ) diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 62974b62919..607c0a9edcd 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -24,6 +24,7 @@ from sqlalchemy.orm import was_deleted from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import assertions from sqlalchemy.testing import config from sqlalchemy.testing import engines @@ -1032,7 +1033,7 @@ def e(mapper, conn, target): def test_extra_dirty_state_post_flush_warning(self): s, a1, a2 = self._test_extra_dirty_state() - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Attribute history events accumulated on 1 previously " "clean instances", @@ -2275,7 +2276,8 @@ def test_m2o_cascade_add(self): def evt(mapper, conn, instance): instance.addresses[0].user = User(name="u2") - self._test(evt, "related attribute set") + with expect_raises_message(orm_exc.FlushError, ".*Over 100"): + self._test(evt, "related attribute set") def test_m2o_cascade_remove(self): def evt(mapper, conn, instance): @@ -2306,7 +2308,10 @@ def test_plain_delete(self): def evt(mapper, conn, instance): object_session(instance).delete(Address(email="x1")) - self._test(evt, r"Session.delete\(\)") + with expect_raises_message( + sa.exc.InvalidRequestError, ".*is not persisted" + ): + self._test(evt, r"Session.delete\(\)") def _test(self, fn, method): User = self.classes.User @@ -2317,6 +2322,6 @@ def _test(self, fn, method): u1 = User(name="u1", addresses=[Address(name="a1")]) s.add(u1) - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Usage of the '%s'" % method, s.commit ) diff --git a/test/orm/test_subquery_relations.py b/test/orm/test_subquery_relations.py index bf14d7212a4..7b0b4dc9dc4 100644 --- a/test/orm/test_subquery_relations.py +++ b/test/orm/test_subquery_relations.py @@ -20,8 +20,8 @@ from sqlalchemy.orm import subqueryload from sqlalchemy.orm import undefer from sqlalchemy.orm import with_polymorphic -from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -1510,7 +1510,7 @@ def test_uselist_false_warning(self): ) self.mapper_registry.map_imperatively(Order, orders) s = fixture_session() - assert_raises( + assert_warns( sa.exc.SAWarning, s.query(User).options(subqueryload(User.order)).all, ) diff --git a/test/orm/test_unitofworkv2.py b/test/orm/test_unitofworkv2.py index af38a4bab00..4546145396a 100644 --- a/test/orm/test_unitofworkv2.py +++ b/test/orm/test_unitofworkv2.py @@ -25,6 +25,7 @@ from sqlalchemy.orm import Session from sqlalchemy.orm import unitofwork from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import config from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -1919,7 +1920,7 @@ def test_delete_twice(self): sess.delete(p1) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 1 row\(s\); 0 were matched.", @@ -1939,7 +1940,7 @@ def test_delete_multi_missing_warning(self): sess.delete(p1) sess.delete(p2) - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 2 row\(s\); 0 were matched.", @@ -2004,7 +2005,7 @@ def test_delete_single_broken_multi_rowcount_still_warns(self): with patch.object( config.db.dialect, "supports_sane_multi_rowcount", False ): - assert_raises_message( + assert_warns_message( exc.SAWarning, r"DELETE statement on table 'parent' expected to " r"delete 1 row\(s\); 0 were matched.", diff --git a/test/orm/test_versioning.py b/test/orm/test_versioning.py index 45fad9ab741..ce01cace7f9 100644 --- a/test/orm/test_versioning.py +++ b/test/orm/test_versioning.py @@ -20,6 +20,8 @@ from sqlalchemy.orm import Session from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import config from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -193,7 +195,7 @@ def test_notsane_warning(self): s1.commit() f1.value = "f1rev2" - assert_raises(sa.exc.SAWarning, s1.commit) + assert_warns(sa.exc.SAWarning, s1.commit) finally: testing.db.dialect.supports_sane_rowcount = save @@ -1328,7 +1330,7 @@ def test_mismatch_version_col_warning(self): Base, base, version_id_col=base.c.version_id ) - assert_raises_message( + assert_warns_message( exc.SAWarning, "Inheriting version_id_col 'version_id' does not " "match inherited version_id_col 'version_id' and will not " diff --git a/test/sql/test_defaults.py b/test/sql/test_defaults.py index ef924e06819..d967db6aaf9 100644 --- a/test/sql/test_defaults.py +++ b/test/sql/test_defaults.py @@ -19,6 +19,7 @@ from sqlalchemy.sql import select from sqlalchemy.sql import text from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ @@ -1447,7 +1448,7 @@ def test_unicode_default(self): def test_nonunicode_default(self): default = b("foo") - assert_raises_message( + assert_warns_message( sa.exc.SAWarning, "Unicode column 'foobar' has non-unicode " "default value b?'foo' specified.", diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index bd921364795..e193c5ec7af 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -53,6 +53,7 @@ from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import mock +from sqlalchemy.testing.assertions import expect_warnings class MetaDataTest(fixtures.TestBase, ComparesTables): @@ -1824,32 +1825,35 @@ def test_pk_cols_sets_flags(self): def test_pk_col_mismatch_one(self): m = MetaData() - assert_raises_message( - exc.SAWarning, + + with expect_warnings( "Table 't' specifies columns 'x' as primary_key=True, " - "not matching locally specified columns 'q'", - Table, - "t", - m, - Column("x", Integer, primary_key=True), - Column("q", Integer), - PrimaryKeyConstraint("q"), - ) + "not matching locally specified columns 'q'" + ): + Table( + "t", + m, + Column("x", Integer, primary_key=True), + Column("q", Integer), + PrimaryKeyConstraint("q"), + ) def test_pk_col_mismatch_two(self): m = MetaData() - assert_raises_message( - exc.SAWarning, + + with expect_warnings( "Table 't' specifies columns 'a', 'b', 'c' as primary_key=True, " - "not matching locally specified columns 'b', 'c'", - Table, - "t", - m, - Column("a", Integer, primary_key=True), - Column("b", Integer, primary_key=True), - Column("c", Integer, primary_key=True), - PrimaryKeyConstraint("b", "c"), - ) + "not matching locally specified columns 'b', 'c'" + ): + + Table( + "t", + m, + Column("a", Integer, primary_key=True), + Column("b", Integer, primary_key=True), + Column("c", Integer, primary_key=True), + PrimaryKeyConstraint("b", "c"), + ) @testing.emits_warning("Table 't'") def test_pk_col_mismatch_three(self): diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index c04078f7372..4eff872f4f3 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -59,6 +59,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not +from sqlalchemy.testing.assertions import expect_deprecated from sqlalchemy.types import ARRAY from sqlalchemy.types import Boolean from sqlalchemy.types import Concatenable @@ -1220,15 +1221,14 @@ def test_empty_clauses(self, op, str_op, str_continue): # these warning classes will change to ArgumentError when the # deprecated behavior is disabled - assert_raises_message( - exc.SADeprecationWarning, + with expect_deprecated( r"Invoking %(str_op)s\(\) without arguments is deprecated, and " r"will be disallowed in a future release. For an empty " r"%(str_op)s\(\) construct, use " r"%(str_op)s\(%(str_continue)s, \*args\)\." - % {"str_op": str_op, "str_continue": str_continue}, - op, - ) + % {"str_op": str_op, "str_continue": str_continue} + ): + op() def test_empty_and_raw(self): self.assert_compile( diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 14b1ca1051b..935c2354dd0 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -75,6 +75,7 @@ from sqlalchemy.sql.sqltypes import TypeEngine from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import assert_warns from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import AssertsExecutionResults from sqlalchemy.testing import engines @@ -1703,10 +1704,10 @@ def test_unicode_warnings_typelevel_native_unicode(self): dialect.supports_unicode_binds = True uni = u.dialect_impl(dialect).bind_processor(dialect) if util.py3k: - assert_raises(exc.SAWarning, uni, b"x") + assert_warns(exc.SAWarning, uni, b"x") assert isinstance(uni(unicodedata), str) else: - assert_raises(exc.SAWarning, uni, "x") + assert_warns(exc.SAWarning, uni, "x") assert isinstance(uni(unicodedata), unicode) # noqa def test_unicode_warnings_typelevel_sqla_unicode(self): @@ -1715,7 +1716,7 @@ def test_unicode_warnings_typelevel_sqla_unicode(self): dialect = default.DefaultDialect() dialect.supports_unicode_binds = False uni = u.dialect_impl(dialect).bind_processor(dialect) - assert_raises(exc.SAWarning, uni, util.b("x")) + assert_warns(exc.SAWarning, uni, util.b("x")) assert isinstance(uni(unicodedata), util.binary_type) eq_(uni(unicodedata), unicodedata.encode("utf-8")) From f315b368183a4f69ead1234ac2ae1ced8a0a6f1c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Jan 2022 09:14:00 -0500 Subject: [PATCH 104/632] make the autoincrement doc a lot more clear I saw it saying "this parameter has no effect when Identity is in use", which is not accurate, so I've expanded this way out with bullets to make the current situation as clear as possible. Change-Id: I77001cc81cd90a6b867686e2975aed682e539347 (cherry picked from commit 8fa6a1b9cc86bf1a580bc0f30a02cd4028051cf4) --- doc/build/glossary.rst | 5 ++ lib/sqlalchemy/sql/schema.py | 149 ++++++++++++++++++++++++++--------- 2 files changed, 117 insertions(+), 37 deletions(-) diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index f979df1476d..2a1e17d3101 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -407,6 +407,11 @@ Glossary class each of which represents a particular database column or relationship to a related class. + identity key + A key associated with ORM-mapped objects that identifies their + primary key identity within the database, as well as their unique + identity within a :class:`_orm.Session` :term:`identity map`. + identity map A mapping between Python objects and their database identities. The identity map is a collection that's associated with an diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 943ed145251..4e8776617ee 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1202,22 +1202,59 @@ def __init__(self, *args, **kwargs): equivalent keyword argument is available such as ``server_default``, ``default`` and ``unique``. - :param autoincrement: Set up "auto increment" semantics for an integer - primary key column. The default value is the string ``"auto"`` - which indicates that a single-column primary key that is of - an INTEGER type with no stated client-side or python-side defaults - should receive auto increment semantics automatically; - all other varieties of primary key columns will not. This - includes that :term:`DDL` such as PostgreSQL SERIAL or MySQL - AUTO_INCREMENT will be emitted for this column during a table - create, as well as that the column is assumed to generate new - integer primary key values when an INSERT statement invokes which - will be retrieved by the dialect. When used in conjunction with - :class:`.Identity` on a dialect that supports it, this parameter - has no effect. - - The flag may be set to ``True`` to indicate that a column which - is part of a composite (e.g. multi-column) primary key should + :param autoincrement: Set up "auto increment" semantics for an + **integer primary key column with no foreign key dependencies** + (see later in this docstring for a more specific definition). + This may influence the :term:`DDL` that will be emitted for + this column during a table create, as well as how the column + will be considered when INSERT statements are compiled and + executed. + + The default value is the string ``"auto"``, + which indicates that a single-column (i.e. non-composite) primary key + that is of an INTEGER type with no other client-side or server-side + default constructs indicated should receive auto increment semantics + automatically. Other values include ``True`` (force this column to + have auto-increment semantics for a :term:`composite primary key` as + well), ``False`` (this column should never have auto-increment + semantics), and the string ``"ignore_fk"`` (special-case for foreign + key columns, see below). + + The term "auto increment semantics" refers both to the kind of DDL + that will be emitted for the column within a CREATE TABLE statement, + when methods such as :meth:`.MetaData.create_all` and + :meth:`.Table.create` are invoked, as well as how the column will be + considered when an INSERT statement is compiled and emitted to the + database: + + * **DDL rendering** (i.e. :meth:`.MetaData.create_all`, + :meth:`.Table.create`): When used on a :class:`.Column` that has + no other + default-generating construct associated with it (such as a + :class:`.Sequence` or :class:`.Identity` construct), the parameter + will imply that database-specific keywords such as PostgreSQL + ``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server + should also be rendered. Not every database backend has an + "implied" default generator available; for example the Oracle + backend always needs an explicit construct such as + :class:`.Identity` to be included with a :class:`.Column` in order + for the DDL rendered to include auto-generating constructs to also + be produced in the database. + + * **INSERT semantics** (i.e. when a :func:`_sql.insert` construct is + compiled into a SQL string and is then executed on a database using + :meth:`_engine.Connection.execute` or equivalent): A single-row + INSERT statement will be known to produce a new integer primary key + value automatically for this column, which will be accessible + after the statement is invoked via the + :attr:`.CursorResult.inserted_primary_key` attribute upon the + :class:`.Result` object. This also applies towards use of the + ORM when ORM-mapped objects are persisted to the database, + indicating that a new integer primary key will be available to + become part of the :term:`identity key` for that object. + + The parameter may be set to ``True`` to indicate that a column which + is part of a composite (i.e. multi-column) primary key should have autoincrement semantics, though note that only one column within a primary key may have this setting. It can also be set to ``True`` to indicate autoincrement semantics on a @@ -1239,7 +1276,6 @@ def __init__(self, *args, **kwargs): that has an explicit client-side or server-side default, subject to limitations of the backend database and dialect. - The setting *only* has an effect for columns which are: * Integer derived (i.e. INT, SMALLINT, BIGINT). @@ -1259,29 +1295,68 @@ def __init__(self, *args, **kwargs): column that refers to another via foreign key, as such a column is required to refer to a value that originates from elsewhere. - The setting has these two effects on columns that meet the + The setting has these effects on columns that meet the above criteria: - * DDL issued for the column will include database-specific + * DDL issued for the column, if the column does not already include + a default generating construct supported by the backend such as + :class:`.Identity`, will include database-specific keywords intended to signify this column as an - "autoincrement" column, such as AUTO INCREMENT on MySQL, - SERIAL on PostgreSQL, and IDENTITY on MS-SQL. It does - *not* issue AUTOINCREMENT for SQLite since this is a - special SQLite flag that is not required for autoincrementing - behavior. - - .. seealso:: - - :ref:`sqlite_autoincrement` - - * The column will be considered to be available using an - "autoincrement" method specific to the backend database, such - as calling upon ``cursor.lastrowid``, using RETURNING in an - INSERT statement to get at a sequence-generated value, or using - special functions such as "SELECT scope_identity()". - These methods are highly specific to the DBAPIs and databases in - use and vary greatly, so care should be taken when associating - ``autoincrement=True`` with a custom default generation function. + "autoincrement" column for specific backends. Behavior for + primary SQLAlchemy dialects includes: + + * AUTO INCREMENT on MySQL and MariaDB + * SERIAL on PostgreSQL + * IDENTITY on MS-SQL - this occurs even without the + :class:`.Identity` construct as the + :paramref:`.Column.autoincrement` parameter pre-dates this + construct. + * SQLite - SQLite integer primary key columns are implicitly + "auto incrementing" and no additional keywords are rendered; + to render the special SQLite keyword ``AUTOINCREMENT`` + is not included as this is unnecessary and not recommended + by the database vendor. See the section + :ref:`sqlite_autoincrement` for more background. + * Oracle - The Oracle dialect has no default "autoincrement" + feature available at this time, instead the :class:`.Identity` + construct is recommended to achieve this (the :class:`.Sequence` + construct may also be used). + * Third-party dialects - consult those dialects' documentation + for details on their specific behaviors. + + * When a single-row :func:`_sql.insert` construct is compiled and + executed, which does not set the :meth:`_sql.Insert.inline` + modifier, newly generated primary key values for this column + will be automatically retrieved upon statement execution + using a method specific to the database driver in use: + + * MySQL, SQLite - calling upon ``cursor.lastrowid()`` + (see + `https://www.python.org/dev/peps/pep-0249/#lastrowid + `_) + * PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent + construct when rendering an INSERT statement, and then retrieving + the newly generated primary key values after execution + * PostgreSQL, Oracle for :class:`_schema.Table` objects that + set :paramref:`_schema.Table.implicit_returning` to False - + for a :class:`.Sequence` only, the :class:`.Sequence` is invoked + explicitly before the INSERT statement takes place so that the + newly generated primary key value is available to the client + * SQL Server for :class:`_schema.Table` objects that + set :paramref:`_schema.Table.implicit_returning` to False - + the ``SELECT scope_identity()`` construct is used after the + INSERT statement is invoked to retrieve the newly generated + primary key value. + * Third-party dialects - consult those dialects' documentation + for details on their specific behaviors. + + * For multiple-row :func:`_sql.insert` constructs invoked with + a list of parameters (i.e. "executemany" semantics), primary-key + retrieving behaviors are generally disabled, however there may + be special APIs that may be used to retrieve lists of new + primary key values for an "executemany", such as the psycopg2 + "fast insertmany" feature. Such features are very new and + may not yet be well covered in documentation. :param default: A scalar, Python callable, or From a334e22bab6194479ca1af1f1f49d012ce918ca6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Jan 2022 09:28:22 -0500 Subject: [PATCH 105/632] dont test squelched warnings against the filter I spent days on Ibcf09af25228d39ee5a943fda82d8a9302433726 reading it over and over again and noticed this slight inaccuracy 10 seconds after I merged it. the assert_warns_message() and assert_warns() functions should not consider a mismatched warning class as valid for a match. Change-Id: Ib8944dd95bcec1a7e4963917a5f4829e2ba27732 (cherry picked from commit f653d5eb169e3d0371eae388aecb0db0cb0b8c11) --- lib/sqlalchemy/testing/assertions.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index aa8edd9affb..9a3c06b0290 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -192,6 +192,8 @@ def our_warn(msg, *arg, **kw): if not exception or not issubclass(exception, _EXC_CLS): if not squelch_other_warnings: return real_warn(msg, *arg, **kw) + else: + return if not filters and not raise_on_any_unexpected: return From a88b25c59bbffe2e5af5668e695926de73ac21b4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Jan 2022 09:41:52 -0500 Subject: [PATCH 106/632] fix bullet indentation, clarify insert independent of DDL Change-Id: I3b6f2b7e23044148e837afdbe4fef66773e42777 (cherry picked from commit 3fc5d28b7999cafc5111ce999a218cd5cd9329a2) --- lib/sqlalchemy/sql/schema.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 4e8776617ee..6240357f54e 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1251,7 +1251,11 @@ def __init__(self, *args, **kwargs): :class:`.Result` object. This also applies towards use of the ORM when ORM-mapped objects are persisted to the database, indicating that a new integer primary key will be available to - become part of the :term:`identity key` for that object. + become part of the :term:`identity key` for that object. This + behavior takes place regardless of what DDL constructs are + associated with the :class:`_schema.Column` and is independent + of the "DDL Rendering" behavior discussed in the previous note + above. The parameter may be set to ``True`` to indicate that a column which is part of a composite (i.e. multi-column) primary key should @@ -1350,13 +1354,13 @@ def __init__(self, *args, **kwargs): * Third-party dialects - consult those dialects' documentation for details on their specific behaviors. - * For multiple-row :func:`_sql.insert` constructs invoked with - a list of parameters (i.e. "executemany" semantics), primary-key - retrieving behaviors are generally disabled, however there may - be special APIs that may be used to retrieve lists of new - primary key values for an "executemany", such as the psycopg2 - "fast insertmany" feature. Such features are very new and - may not yet be well covered in documentation. + * For multiple-row :func:`_sql.insert` constructs invoked with + a list of parameters (i.e. "executemany" semantics), primary-key + retrieving behaviors are generally disabled, however there may + be special APIs that may be used to retrieve lists of new + primary key values for an "executemany", such as the psycopg2 + "fast insertmany" feature. Such features are very new and + may not yet be well covered in documentation. :param default: A scalar, Python callable, or From 71248e34befe26f3c116dd52d782c519a5022168 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Jan 2022 09:14:32 -0500 Subject: [PATCH 107/632] replace "e.g.::" with more context regarding attribute events example Fixes: #7613 Change-Id: I28a9577587399c41afd53fbd026003667654c3fc (cherry picked from commit 5e3357c70e419c244156ac3885b2cf784b5b3fc0) --- lib/sqlalchemy/orm/events.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 1514a2d433d..9a6acbd4225 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -2142,7 +2142,8 @@ class AttributeEvents(event.Events): These are typically defined on the class-bound descriptor for the target class. - e.g.:: + For example, to register a listener that will receive the + :meth:`_orm.AttributeEvents.append` event:: from sqlalchemy import event @@ -2153,7 +2154,8 @@ def my_append_listener(target, value, initiator): Listeners have the option to return a possibly modified version of the value, when the :paramref:`.AttributeEvents.retval` flag is passed to - :func:`.event.listen` or :func:`.event.listens_for`:: + :func:`.event.listen` or :func:`.event.listens_for`, such as below, + illustrated using the :meth:`_orm.AttributeEvents.set` event:: def validate_phone(target, value, oldvalue, initiator): "Strip non-numeric characters from a phone number" From dc5f111bc51bdc0720c2d52547339479e01619a6 Mon Sep 17 00:00:00 2001 From: Gilbert Gilb's Date: Sun, 23 Jan 2022 13:00:35 -0500 Subject: [PATCH 108/632] Add compiler support for PostgreSQL "NOT VALID" constraints. Added compiler support for the PostgreSQL ``NOT VALID`` phrase when rendering DDL for the :class:`.CheckConstraint`, :class:`.ForeignKeyConstraint` and :class:`.ForeignKey` schema constructs. Pull request courtesy Gilbert Gilb's. Fixes: #7600 Closes: #7601 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7601 Pull-request-sha: 78eecd55fd9fad07030d963f5fd6713c4af60e80 Change-Id: I84bfe84596856eeea2bcca45c04ad23d980a75ec (cherry picked from commit 77dd6808f250e0431f9bce824f46f6e1ef63eef3) --- doc/build/changelog/unreleased_14/7600.rst | 12 ++++ lib/sqlalchemy/dialects/postgresql/base.py | 76 +++++++++++++++++++++- test/dialect/postgresql/test_compiler.py | 59 +++++++++++++++++ 3 files changed, 145 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7600.rst diff --git a/doc/build/changelog/unreleased_14/7600.rst b/doc/build/changelog/unreleased_14/7600.rst new file mode 100644 index 00000000000..2f843ea1988 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7600.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: usecase, postgresql + :tickets: 7600 + + Added compiler support for the PostgreSQL ``NOT VALID`` phrase when rendering + DDL for the :class:`.CheckConstraint`, :class:`.ForeignKeyConstraint` + and :class:`.ForeignKey` schema constructs. Pull request courtesy + Gilbert Gilb's. + + .. seealso:: + + :ref:`postgresql_constraint_options` diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index ea31a355266..95c8f416a84 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1058,7 +1058,54 @@ def set_search_path(dbapi_connection, connection_record): .. seealso:: `PostgreSQL CREATE TABLE options - `_ + `_ - + in the PostgreSQL documentation. + +.. _postgresql_constraint_options: + +PostgreSQL Constraint Options +----------------------------- + +The following option(s) are supported by the PostgreSQL dialect in conjunction +with selected constraint constructs: + +* ``NOT VALID``: This option applies towards CHECK and FOREIGN KEY constraints + when the constraint is being added to an existing table via ALTER TABLE, + and has the effect that existing rows are not scanned during the ALTER + operation against the constraint being added. + + When using a SQL migration tool such as `Alembic `_ + that renders ALTER TABLE constructs, the ``postgresql_not_valid`` argument + may be specified as an additional keyword argument within the operation + that creates the constraint, as in the following Alembic example:: + + def update(): + op.create_foreign_key( + "fk_user_address", + "address", + "user", + ["user_id"], + ["id"], + postgresql_not_valid=True + ) + + The keyword is ultimately accepted directly by the + :class:`_schema.CheckConstraint`, :class:`_schema.ForeignKeyConstraint` + and :class:`_schema.ForeignKey` constructs; when using a tool like + Alembic, dialect-specific keyword arguments are passed through to + these constructs from the migration operation directives:: + + CheckConstraint("some_field IS NOT NULL", postgresql_not_valid=True) + + ForeignKeyConstraint(["some_id"], ["some_table.some_id"], postgresql_not_valid=True) + + .. versionadded:: 1.4.32 + + .. seealso:: + + `PostgreSQL ALTER TABLE options + `_ - + in the PostgreSQL documentation. .. _postgresql_table_valued_overview: @@ -2585,6 +2632,10 @@ def get_column_specification(self, column, **kwargs): colspec += " NULL" return colspec + def _define_constraint_validity(self, constraint): + not_valid = constraint.dialect_options["postgresql"]["not_valid"] + return " NOT VALID" if not_valid else "" + def visit_check_constraint(self, constraint): if constraint._type_bound: typ = list(constraint.columns)[0].type @@ -2599,7 +2650,16 @@ def visit_check_constraint(self, constraint): "create_constraint=False on this Enum datatype." ) - return super(PGDDLCompiler, self).visit_check_constraint(constraint) + text = super(PGDDLCompiler, self).visit_check_constraint(constraint) + text += self._define_constraint_validity(constraint) + return text + + def visit_foreign_key_constraint(self, constraint): + text = super(PGDDLCompiler, self).visit_foreign_key_constraint( + constraint + ) + text += self._define_constraint_validity(constraint) + return text def visit_drop_table_comment(self, drop): return "COMMENT ON TABLE %s IS NULL" % self.preparer.format_table( @@ -3213,6 +3273,18 @@ class PGDialect(default.DefaultDialect): "inherits": None, }, ), + ( + schema.CheckConstraint, + { + "not_valid": False, + }, + ), + ( + schema.ForeignKeyConstraint, + { + "not_valid": False, + }, + ), ] reflection_options = ("postgresql_ignore_search_path",) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 5ada6f592f3..383a77c1d61 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -3,6 +3,7 @@ from sqlalchemy import BigInteger from sqlalchemy import bindparam from sqlalchemy import cast +from sqlalchemy import CheckConstraint from sqlalchemy import Column from sqlalchemy import Computed from sqlalchemy import Date @@ -10,6 +11,8 @@ from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import ForeignKeyConstraint from sqlalchemy import func from sqlalchemy import Identity from sqlalchemy import Index @@ -857,6 +860,62 @@ def test_drop_index_concurrently(self): schema.DropIndex(idx1), "DROP INDEX test_idx1", dialect=dialect_9_1 ) + def test_create_check_constraint_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("data", Integer), + CheckConstraint("data = 0", postgresql_not_valid=True), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (data INTEGER, CHECK (data = 0) NOT VALID)", + ) + + def test_create_foreign_key_constraint_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("a", Integer), + Column("b", Integer), + ForeignKeyConstraint( + "b", ["testtbl.a"], postgresql_not_valid=True + ), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (" + "a INTEGER, " + "b INTEGER, " + "FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID" + ")", + ) + + def test_create_foreign_key_column_not_valid(self): + m = MetaData() + + tbl = Table( + "testtbl", + m, + Column("a", Integer), + Column("b", ForeignKey("testtbl.a", postgresql_not_valid=True)), + ) + + self.assert_compile( + schema.CreateTable(tbl), + "CREATE TABLE testtbl (" + "a INTEGER, " + "b INTEGER, " + "FOREIGN KEY(b) REFERENCES testtbl (a) NOT VALID" + ")", + ) + def test_exclude_constraint_min(self): m = MetaData() tbl = Table("testtbl", m, Column("room", Integer, primary_key=True)) From bcea1d0e7bc8209247ed98a5b549420294fb0a53 Mon Sep 17 00:00:00 2001 From: Markus Gerstel Date: Wed, 26 Jan 2022 04:56:40 -0500 Subject: [PATCH 109/632] Fix up Python logging metadata Adjusted the logging for key SQLAlchemy components including :class:`_engine.Engine`, :class:`_engine.Connection` to establish an appropriate stack level parameter, so that the Python logging tokens ``funcName`` and ``lineno`` when used in custom logging formatters will report the correct information, which can be useful when filtering log output; supported on Python 3.8 and above. Pull request courtesy Markus Gerstel. Fixes: #7612 Closes: #7615 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7615 Pull-request-sha: cf9567beb06680df320cb12dde1f15baa68e1eb5 Change-Id: Iff23c92ef3453ac93cbd0d190e7efbf8ea4457a2 (cherry picked from commit f24a34140f6007cada900a8ae5ed03fe40ce2631) --- doc/build/changelog/unreleased_14/7612.rst | 11 +++ lib/sqlalchemy/engine/base.py | 6 ++ lib/sqlalchemy/log.py | 4 + test/engine/test_logging.py | 106 +++++++++++++++++++++ 4 files changed, 127 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7612.rst diff --git a/doc/build/changelog/unreleased_14/7612.rst b/doc/build/changelog/unreleased_14/7612.rst new file mode 100644 index 00000000000..c8992045fcf --- /dev/null +++ b/doc/build/changelog/unreleased_14/7612.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, engine + :tickets: 7612 + + Adjusted the logging for key SQLAlchemy components including + :class:`_engine.Engine`, :class:`_engine.Connection` to establish an + appropriate stack level parameter, so that the Python logging tokens + ``funcName`` and ``lineno`` when used in custom logging formatters will + report the correct information, which can be useful when filtering log + output; supported on Python 3.8 and above. Pull request courtesy Markus + Gerstel. diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 0e695f65a55..cf6a14728b1 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -133,6 +133,9 @@ def _log_info(self, message, *arg, **kw): if fmt: message = fmt(message) + if util.py38: + kw["stacklevel"] = 2 + self.engine.logger.info(message, *arg, **kw) def _log_debug(self, message, *arg, **kw): @@ -141,6 +144,9 @@ def _log_debug(self, message, *arg, **kw): if fmt: message = fmt(message) + if util.py38: + kw["stacklevel"] = 2 + self.engine.logger.debug(message, *arg, **kw) @property diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 55511c27b82..07c5eff287c 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -21,6 +21,7 @@ import logging import sys +from .util import py38 # set initial level to WARN. This so that # log statements don't occur in the absence of explicit @@ -160,6 +161,9 @@ def log(self, level, msg, *args, **kwargs): selected_level = self.logger.getEffectiveLevel() if level >= selected_level: + if py38: + kwargs["stacklevel"] = kwargs.get("stacklevel", 1) + 1 + self.logger._log(level, msg, args, **kwargs) def isEnabledFor(self, level): diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py index c5f8b69b64d..806336368b1 100644 --- a/test/engine/test_logging.py +++ b/test/engine/test_logging.py @@ -587,6 +587,30 @@ def test_unnamed_logger_echoflags_execute(self): class TransactionContextLoggingTest(fixtures.TestBase): + __only_on__ = "sqlite" + + @testing.fixture() + def plain_assert_buf(self, plain_logging_engine): + buf = logging.handlers.BufferingHandler(100) + for log in [ + logging.getLogger("sqlalchemy.engine"), + ]: + log.addHandler(buf) + + def go(expected): + assert buf.buffer + + buflines = [rec.msg % rec.args for rec in buf.buffer] + + eq_(buflines, expected) + buf.flush() + + yield go + for log in [ + logging.getLogger("sqlalchemy.engine"), + ]: + log.removeHandler(buf) + @testing.fixture() def assert_buf(self, logging_engine): buf = logging.handlers.BufferingHandler(100) @@ -616,6 +640,21 @@ def logging_engine(self, testing_engine): e.connect().close() return e + @testing.fixture() + def plain_logging_engine(self, testing_engine): + # deliver an engine with logging using the plain logging API, + # not the echo parameter + log = logging.getLogger("sqlalchemy.engine") + existing_level = log.level + log.setLevel(logging.DEBUG) + + try: + e = testing_engine(future=True) + e.connect().close() + yield e + finally: + log.setLevel(existing_level) + def test_begin_once_block(self, logging_engine, assert_buf): with logging_engine.begin(): pass @@ -670,6 +709,73 @@ def test_commit_as_you_go_block_rollback_autocommit( ] ) + def test_logging_compatibility( + self, plain_assert_buf, plain_logging_engine + ): + """ensure plain logging doesn't produce API errors. + + Added as part of #7612 + + """ + e = plain_logging_engine + + with e.connect() as conn: + result = conn.exec_driver_sql("select 1") + result.all() + + plain_assert_buf( + [ + "BEGIN (implicit)", + "select 1", + "[raw sql] ()", + "Col ('1',)", + "Row (1,)", + "ROLLBACK", + ] + ) + + @testing.requires.python38 + def test_log_messages_have_correct_metadata_plain( + self, plain_logging_engine + ): + """test #7612""" + self._test_log_messages_have_correct_metadata(plain_logging_engine) + + @testing.requires.python38 + def test_log_messages_have_correct_metadata_echo(self, logging_engine): + """test #7612""" + self._test_log_messages_have_correct_metadata(logging_engine) + + def _test_log_messages_have_correct_metadata(self, logging_engine): + buf = logging.handlers.BufferingHandler(100) + log = logging.getLogger("sqlalchemy.engine") + try: + log.addHandler(buf) + + with logging_engine.connect().execution_options( + isolation_level="AUTOCOMMIT" + ) as conn: + conn.begin() + conn.rollback() + finally: + log.removeHandler(buf) + + assert len(buf.buffer) >= 2 + + # log messages must originate from functions called 'begin'/'rollback' + logging_functions = {rec.funcName for rec in buf.buffer} + assert any( + "begin" in fn for fn in logging_functions + ), logging_functions + assert any( + "rollback" in fn for fn in logging_functions + ), logging_functions + + # log messages must originate from different lines + log_lines = {rec.lineno for rec in buf.buffer} + assert len(log_lines) > 1, log_lines + buf.flush() + class LoggingTokenTest(fixtures.TestBase): def setup_test(self): From f688a81c479a3b5fa0d355ce17ffa58c363ae0d6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 30 Jan 2022 14:25:36 -0500 Subject: [PATCH 110/632] split out declarative varieties into the declarative mapping section specific declarative styles like those for dataclasses and attrs should be in the more reference-oriented "declarative_mapping" section rather than the more introduction-oriented mapping_styles. this will also make it easier for us to add still more ways of mapping declaratively for 2.0. for the cherry-pick, also remove the misleading line about stubs. Fixes: #7575 Change-Id: I2700c2c2b34db9680f9cbe6ed6197add773a6a5d (cherry picked from commit 6bfd428eea7bf712d962a0c4c6a7fb15e14955f6) --- doc/build/orm/declarative_mapping.rst | 1 + doc/build/orm/declarative_styles.rst | 484 ++++++++++++++++++++++++++ doc/build/orm/mapping_styles.rst | 427 +---------------------- 3 files changed, 495 insertions(+), 417 deletions(-) create mode 100644 doc/build/orm/declarative_styles.rst diff --git a/doc/build/orm/declarative_mapping.rst b/doc/build/orm/declarative_mapping.rst index 9d2f3af40a2..1bb07e6af4a 100644 --- a/doc/build/orm/declarative_mapping.rst +++ b/doc/build/orm/declarative_mapping.rst @@ -12,6 +12,7 @@ top level introduction. .. toctree:: :maxdepth: 3 + declarative_styles declarative_tables declarative_config declarative_mixins diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst new file mode 100644 index 00000000000..2b4178a856f --- /dev/null +++ b/doc/build/orm/declarative_styles.rst @@ -0,0 +1,484 @@ +.. _orm_declarative_styles_toplevel: + +========================== +Declarative Mapping Styles +========================== + +As introduced at :ref:`orm_declarative_mapping`, the **Declarative Mapping** is +the typical way that mappings are constructed in modern SQLAlchemy. This +section will provide an overview of forms that may be used for Declarative +mapper configuration. + + +.. _orm_declarative_generated_base_class: + +Using a Generated Base Class +---------------------------- + +The most common approach is to generate a "base" class using the +:func:`_orm.declarative_base` function:: + + from sqlalchemy.orm import declarative_base + + # declarative base class + Base = declarative_base() + + +The declarative base class may also be created from an existing +:class:`_orm.registry`, by using the :meth:`_orm.registry.generate_base` +method:: + + from sqlalchemy.orm import registry + + reg = registry() + + # declarative base class + Base = reg.generate_base() + +With the declarative base class, new mapped classes are declared as subclasses +of the base:: + + from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy.orm import declarative_base + + # declarative base class + Base = declarative_base() + + # an example mapping using the base + class User(Base): + __tablename__ = 'user' + + id = Column(Integer, primary_key=True) + name = Column(String) + fullname = Column(String) + nickname = Column(String) + +Above, the :func:`_orm.declarative_base` function returns a new base class from +which new classes to be mapped may inherit from, as above a new mapped +class ``User`` is constructed. + +For each subclass constructed, the body of the class then follows the +declarative mapping approach which defines both a :class:`_schema.Table` +as well as a :class:`_orm.Mapper` object behind the scenes which comprise +a full mapping. + +.. seealso:: + + :ref:`orm_declarative_table_config_toplevel` + + :ref:`orm_declarative_mapper_config_toplevel` + + +.. _orm_explicit_declarative_base: + +Creating an Explicit Base Non-Dynamically (for use with mypy, similar) +---------------------------------------------------------------------- + +SQLAlchemy includes a :ref:`Mypy plugin ` that automatically +accommodates for the dynamically generated ``Base`` class delivered by +SQLAlchemy functions like :func:`_orm.declarative_base`. For the **SQLAlchemy +1.4 series only**, this plugin works along with a new set of typing stubs +published at `sqlalchemy2-stubs `_. + +When this plugin is not in use, or when using other :pep:`484` tools which +may not know how to interpret this class, the declarative base class may +be produced in a fully explicit fashion using the +:class:`_orm.DeclarativeMeta` directly as follows:: + + from sqlalchemy.orm import registry + from sqlalchemy.orm.decl_api import DeclarativeMeta + + mapper_registry = registry() + + class Base(metaclass=DeclarativeMeta): + __abstract__ = True + + registry = mapper_registry + metadata = mapper_registry.metadata + + __init__ = mapper_registry.constructor + +The above ``Base`` is equivalent to one created using the +:meth:`_orm.registry.generate_base` method and will be fully understood by +type analysis tools without the use of plugins. + +.. seealso:: + + :ref:`mypy_toplevel` - background on the Mypy plugin which applies the + above structure automatically when running Mypy. + + +.. _orm_declarative_decorator: + +Declarative Mapping using a Decorator (no declarative base) +------------------------------------------------------------ + +As an alternative to using the "declarative base" class is to apply +declarative mapping to a class explicitly, using either an imperative technique +similar to that of a "classical" mapping, or more succinctly by using +a decorator. The :meth:`_orm.registry.mapped` function is a class decorator +that can be applied to any Python class with no hierarchy in place. The +Python class otherwise is configured in declarative style normally:: + + from sqlalchemy import Column, Integer, String, Text, ForeignKey + + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + @mapper_registry.mapped + class User: + __tablename__ = 'user' + + id = Column(Integer, primary_key=True) + name = Column(String) + + addresses = relationship("Address", back_populates="user") + + @mapper_registry.mapped + class Address: + __tablename__ = 'address' + + id = Column(Integer, primary_key=True) + user_id = Column(ForeignKey("user.id")) + email_address = Column(String) + + user = relationship("User", back_populates="addresses") + +Above, the same :class:`_orm.registry` that we'd use to generate a declarative +base class via its :meth:`_orm.registry.generate_base` method may also apply +a declarative-style mapping to a class without using a base. When using +the above style, the mapping of a particular class will **only** proceed +if the decorator is applied to that class directly. For inheritance +mappings, the decorator should be applied to each subclass:: + + from sqlalchemy.orm import registry + mapper_registry = registry() + + @mapper_registry.mapped + class Person: + __tablename__ = "person" + + person_id = Column(Integer, primary_key=True) + type = Column(String, nullable=False) + + __mapper_args__ = { + + "polymorphic_on": type, + "polymorphic_identity": "person" + } + + + @mapper_registry.mapped + class Employee(Person): + __tablename__ = "employee" + + person_id = Column(ForeignKey("person.person_id"), primary_key=True) + + __mapper_args__ = { + "polymorphic_identity": "employee" + } + +Both the "declarative table" and "imperative table" styles of declarative +mapping may be used with the above mapping style. + +The decorator form of mapping is particularly useful when combining a +SQLAlchemy declarative mapping with other forms of class declaration, notably +the Python ``dataclasses`` module. See the next section. + +.. _orm_declarative_dataclasses: + +Declarative Mapping with Dataclasses and Attrs +---------------------------------------------- + +The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class +decorator to automatically generate boilerplate definitions of ``__init__()``, +``__eq__()``, ``__repr()__``, etc. methods. Another very popular library that does +the same, and much more, is attrs_. Both libraries make use of class +decorators in order to scan a class for attributes that define the class' +behavior, which are then used to generate methods, documentation, and annotations. + +The :meth:`_orm.registry.mapped` class decorator allows the declarative mapping +of a class to occur after the class has been fully constructed, allowing the +class to be processed by other class decorators first. The ``@dataclass`` +and ``@attr.s`` decorators may therefore be applied first before the +ORM mapping process proceeds via the :meth:`_orm.registry.mapped` decorator +or via the :meth:`_orm.registry.map_imperatively` method discussed in a +later section. + +Mapping with ``@dataclass`` or ``@attr.s`` may be used in a straightforward +way with :ref:`orm_imperative_table_configuration` style, where the +the :class:`_schema.Table`, which means that it is defined separately and +associated with the class via the ``__table__``. For dataclasses specifically, +:ref:`orm_declarative_table` is also supported. + +.. versionadded:: 1.4.0b2 Added support for full declarative mapping when using + dataclasses. + +When attributes are defined using ``dataclasses``, the ``@dataclass`` +decorator consumes them but leaves them in place on the class. +SQLAlchemy's mapping process, when it encounters an attribute that normally +is to be mapped to a :class:`_schema.Column`, checks explicitly if the +attribute is part of a Dataclasses setup, and if so will **replace** +the class-bound dataclass attribute with its usual mapped +properties. The ``__init__`` method created by ``@dataclass`` is left +intact. In contrast, the ``@attr.s`` decorator actually removes its +own class-bound attributes after the decorator runs, so that SQLAlchemy's +mapping process takes over these attributes without any issue. + +.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses, + where the :class:`_orm.Mapper` will now detect attributes that are specific + to the ``@dataclasses`` module and replace them at mapping time, rather + than skipping them as is the default behavior for any class attribute + that's not part of the mapping. + +.. _orm_declarative_dataclasses_imperative_table: + +Example One - Dataclasses with Imperative Table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An example of a mapping using ``@dataclass`` using +:ref:`orm_imperative_table_configuration` is as follows:: + + from __future__ import annotations + + from dataclasses import dataclass + from dataclasses import field + from typing import List + from typing import Optional + + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id: int = field(init=False) + name: Optional[str] = None + fullname: Optional[str] = None + nickname: Optional[str] = None + addresses: List[Address] = field(default_factory=list) + + __mapper_args__ = { # type: ignore + "properties" : { + "addresses": relationship("Address") + } + } + + @mapper_registry.mapped + @dataclass + class Address: + __table__ = Table( + "address", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + id: int = field(init=False) + user_id: int = field(init=False) + email_address: Optional[str] = None + +In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` +attributes are defined as ``field(init=False)``. This means that parameters for +these won't be added to ``__init__()`` methods, but +:class:`.Session` will still be able to set them after getting their values +during flush from autoincrement or other default value generator. To +allow them to be specified in the constructor explicitly, they would instead +be given a default value of ``None``. + +For a :func:`_orm.relationship` to be declared separately, it needs to be +specified directly within the :paramref:`_orm.Mapper.properties` dictionary +which itself is specified within the ``__mapper_args__`` dictionary, so that it +is passed to the constructor for :class:`_orm.Mapper`. An alternative to this +approach is in the next example. + +.. _orm_declarative_dataclasses_declarative_table: + +Example Two - Dataclasses with Declarative Table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The fully declarative approach requires that :class:`_schema.Column` objects +are declared as class attributes, which when using dataclasses would conflict +with the dataclass-level attributes. An approach to combine these together +is to make use of the ``metadata`` attribute on the ``dataclass.field`` +object, where SQLAlchemy-specific mapping information may be supplied. +Declarative supports extraction of these parameters when the class +specifies the attribute ``__sa_dataclass_metadata_key__``. This also +provides a more succinct method of indicating the :func:`_orm.relationship` +association:: + + + from __future__ import annotations + + from dataclasses import dataclass + from dataclasses import field + from typing import List + + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import String + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + name: str = field(default=None, metadata={"sa": Column(String(50))}) + fullname: str = field(default=None, metadata={"sa": Column(String(50))}) + nickname: str = field(default=None, metadata={"sa": Column(String(12))}) + addresses: List[Address] = field( + default_factory=list, metadata={"sa": relationship("Address")} + ) + + + @mapper_registry.mapped + @dataclass + class Address: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + user_id: int = field( + init=False, metadata={"sa": Column(ForeignKey("user.id"))} + ) + email_address: str = field( + default=None, metadata={"sa": Column(String(50))} + ) + +.. _orm_declarative_dataclasses_mixin: + +Using Declarative Mixins with Dataclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes +are introduced. One requirement of declarative mixins is that certain +constructs that can't be easily duplicated must be given as callables, +using the :class:`_orm.declared_attr` decorator, such as in the +example at :ref:`orm_declarative_mixins_relationships`:: + + class RefTargetMixin: + @declared_attr + def target_id(cls): + return Column('target_id', ForeignKey('target.id')) + + @declared_attr + def target(cls): + return relationship("Target") + +This form is supported within the Dataclasses ``field()`` object by using +a lambda to indicate the SQLAlchemy construct inside the ``field()``. +Using :func:`_orm.declared_attr` to surround the lambda is optional. +If we wanted to produce our ``User`` class above where the ORM fields +came from a mixin that is itself a dataclass, the form would be:: + + @dataclass + class UserMixin: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + + addresses: List[Address] = field( + default_factory=list, metadata={"sa": lambda: relationship("Address")} + ) + + @dataclass + class AddressMixin: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + user_id: int = field( + init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} + ) + email_address: str = field( + default=None, metadata={"sa": Column(String(50))} + ) + + @mapper_registry.mapped + class User(UserMixin): + pass + + @mapper_registry.mapped + class Address(AddressMixin): + pass + +.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, + namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` + objects with foreign key declarations, to be used within "Dataclasses + with Declarative Table" style mappings. + +.. _orm_declarative_attrs_imperative_table: + +Example Three - attrs with Imperative Table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A mapping using ``@attr.s``, in conjunction with imperative table:: + + import attr + + # other imports + + from sqlalchemy.orm import registry + + mapper_registry = registry() + + + @mapper_registry.mapped + @attr.s + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id = attr.ib() + name = attr.ib() + fullname = attr.ib() + nickname = attr.ib() + addresses = attr.ib() + + # other classes... + +``@dataclass`` and attrs_ mappings may also be used with classical mappings, i.e. +with the :meth:`_orm.registry.map_imperatively` function. See the section +:ref:`orm_imperative_dataclasses` for a similar example. + +.. _dataclasses: https://docs.python.org/3/library/dataclasses.html +.. _attrs: https://pypi.org/project/attrs/ diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index e643cfce633..c5791b11cfa 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -88,7 +88,12 @@ constructor, and then generating a base class using the Base = mapper_registry.generate_base() The :class:`_orm.registry` is used directly in order to access a variety -of mapping styles to suit different use cases: +of mapping styles to suit different use cases. The primary mapping styles +offered by :class:`_orm.registry` are further detailed in the following +sections: + +* :ref:`orm_declarative_generated_base_class` - declarative mapping using a + base class generated by the :class:`_orm.registry` object. * :ref:`orm_declarative_decorator` - declarative mapping using a decorator, rather than a base class. @@ -100,422 +105,11 @@ Documentation for Declarative mapping continues at :ref:`declarative_config_topl .. seealso:: - :ref:`declarative_config_toplevel` - -.. _orm_explicit_declarative_base: - -Creating an Explicit Base Non-Dynamically (for use with mypy, similar) ----------------------------------------------------------------------- - -SQLAlchemy includes a :ref:`Mypy plugin ` that automatically -accommodates for the dynamically generated ``Base`` class -delivered by SQLAlchemy functions like :func:`_orm.declarative_base`. -This plugin works along with a new set of typing stubs published at -`sqlalchemy2-stubs `_. - -When this plugin is not in use, or when using other :pep:`484` tools which -may not know how to interpret this class, the declarative base class may -be produced in a fully explicit fashion using the -:class:`_orm.DeclarativeMeta` directly as follows:: - - from sqlalchemy.orm import registry - from sqlalchemy.orm.decl_api import DeclarativeMeta - - mapper_registry = registry() - - class Base(metaclass=DeclarativeMeta): - __abstract__ = True - - # these are supplied by the sqlalchemy2-stubs, so may be omitted - # when they are installed - registry = mapper_registry - metadata = mapper_registry.metadata - - __init__ = mapper_registry.constructor - -The above ``Base`` is equivalent to one created using the -:meth:`_orm.registry.generate_base` method and will be fully understood by -type analysis tools without the use of plugins. - -.. seealso:: - - :ref:`mypy_toplevel` - background on the Mypy plugin which applies the - above structure automatically when running Mypy. - - -.. _orm_declarative_decorator: - -Declarative Mapping using a Decorator (no declarative base) ------------------------------------------------------------- - -As an alternative to using the "declarative base" class is to apply -declarative mapping to a class explicitly, using either an imperative technique -similar to that of a "classical" mapping, or more succinctly by using -a decorator. The :meth:`_orm.registry.mapped` function is a class decorator -that can be applied to any Python class with no hierarchy in place. The -Python class otherwise is configured in declarative style normally:: - - from sqlalchemy import Column, Integer, String, Text, ForeignKey - - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - @mapper_registry.mapped - class User: - __tablename__ = 'user' - - id = Column(Integer, primary_key=True) - name = Column(String) - - addresses = relationship("Address", back_populates="user") - - @mapper_registry.mapped - class Address: - __tablename__ = 'address' - - id = Column(Integer, primary_key=True) - user_id = Column(ForeignKey("user.id")) - email_address = Column(String) - - user = relationship("User", back_populates="addresses") - -Above, the same :class:`_orm.registry` that we'd use to generate a declarative -base class via its :meth:`_orm.registry.generate_base` method may also apply -a declarative-style mapping to a class without using a base. When using -the above style, the mapping of a particular class will **only** proceed -if the decorator is applied to that class directly. For inheritance -mappings, the decorator should be applied to each subclass:: - - from sqlalchemy.orm import registry - mapper_registry = registry() - - @mapper_registry.mapped - class Person: - __tablename__ = "person" - - person_id = Column(Integer, primary_key=True) - type = Column(String, nullable=False) - - __mapper_args__ = { - - "polymorphic_on": type, - "polymorphic_identity": "person" - } - - - @mapper_registry.mapped - class Employee(Person): - __tablename__ = "employee" - - person_id = Column(ForeignKey("person.person_id"), primary_key=True) - - __mapper_args__ = { - "polymorphic_identity": "employee" - } - -Both the "declarative table" and "imperative table" styles of declarative -mapping may be used with the above mapping style. - -The decorator form of mapping is particularly useful when combining a -SQLAlchemy declarative mapping with other forms of class declaration, notably -the Python ``dataclasses`` module. See the next section. - -.. _orm_declarative_dataclasses: - -Declarative Mapping with Dataclasses and Attrs ----------------------------------------------- + * :ref:`declarative_config_toplevel` -The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class -decorator to automatically generate boilerplate definitions of ``__init__()``, -``__eq__()``, ``__repr()__``, etc. methods. Another very popular library that does -the same, and much more, is attrs_. Both libraries make use of class -decorators in order to scan a class for attributes that define the class' -behavior, which are then used to generate methods, documentation, and annotations. - -The :meth:`_orm.registry.mapped` class decorator allows the declarative mapping -of a class to occur after the class has been fully constructed, allowing the -class to be processed by other class decorators first. The ``@dataclass`` -and ``@attr.s`` decorators may therefore be applied first before the -ORM mapping process proceeds via the :meth:`_orm.registry.mapped` decorator -or via the :meth:`_orm.registry.map_imperatively` method discussed in a -later section. - -Mapping with ``@dataclass`` or ``@attr.s`` may be used in a straightforward -way with :ref:`orm_imperative_table_configuration` style, where the -the :class:`_schema.Table`, which means that it is defined separately and -associated with the class via the ``__table__``. For dataclasses specifically, -:ref:`orm_declarative_table` is also supported. - -.. versionadded:: 1.4.0b2 Added support for full declarative mapping when using - dataclasses. - -When attributes are defined using ``dataclasses``, the ``@dataclass`` -decorator consumes them but leaves them in place on the class. -SQLAlchemy's mapping process, when it encounters an attribute that normally -is to be mapped to a :class:`_schema.Column`, checks explicitly if the -attribute is part of a Dataclasses setup, and if so will **replace** -the class-bound dataclass attribute with its usual mapped -properties. The ``__init__`` method created by ``@dataclass`` is left -intact. In contrast, the ``@attr.s`` decorator actually removes its -own class-bound attributes after the decorator runs, so that SQLAlchemy's -mapping process takes over these attributes without any issue. - -.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses, - where the :class:`_orm.Mapper` will now detect attributes that are specific - to the ``@dataclasses`` module and replace them at mapping time, rather - than skipping them as is the default behavior for any class attribute - that's not part of the mapping. - -.. _orm_declarative_dataclasses_imperative_table: - -Example One - Dataclasses with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -An example of a mapping using ``@dataclass`` using -:ref:`orm_imperative_table_configuration` is as follows:: - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - from typing import Optional - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import Table - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id: int = field(init=False) - name: Optional[str] = None - fullname: Optional[str] = None - nickname: Optional[str] = None - addresses: List[Address] = field(default_factory=list) - - __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } - } - - @mapper_registry.mapped - @dataclass - class Address: - __table__ = Table( - "address", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("user.id")), - Column("email_address", String(50)), - ) - id: int = field(init=False) - user_id: int = field(init=False) - email_address: Optional[str] = None - -In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` -attributes are defined as ``field(init=False)``. This means that parameters for -these won't be added to ``__init__()`` methods, but -:class:`.Session` will still be able to set them after getting their values -during flush from autoincrement or other default value generator. To -allow them to be specified in the constructor explicitly, they would instead -be given a default value of ``None``. - -For a :func:`_orm.relationship` to be declared separately, it needs to -be specified directly within the :paramref:`_orm.mapper.properties` -dictionary passed to the :func:`_orm.mapper`. An alternative to this -approach is in the next example. - -.. _orm_declarative_dataclasses_declarative_table: - -Example Two - Dataclasses with Declarative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The fully declarative approach requires that :class:`_schema.Column` objects -are declared as class attributes, which when using dataclasses would conflict -with the dataclass-level attributes. An approach to combine these together -is to make use of the ``metadata`` attribute on the ``dataclass.field`` -object, where SQLAlchemy-specific mapping information may be supplied. -Declarative supports extraction of these parameters when the class -specifies the attribute ``__sa_dataclass_metadata_key__``. This also -provides a more succinct method of indicating the :func:`_orm.relationship` -association:: - - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - name: str = field(default=None, metadata={"sa": Column(String(50))}) - fullname: str = field(default=None, metadata={"sa": Column(String(50))}) - nickname: str = field(default=None, metadata={"sa": Column(String(12))}) - addresses: List[Address] = field( - default_factory=list, metadata={"sa": relationship("Address")} - ) - - - @mapper_registry.mapped - @dataclass - class Address: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - -.. _orm_declarative_dataclasses_mixin: - -Using Declarative Mixins with Dataclasses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes -are introduced. One requirement of declarative mixins is that certain -constructs that can't be easily duplicated must be given as callables, -using the :class:`_orm.declared_attr` decorator, such as in the -example at :ref:`orm_declarative_mixins_relationships`:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target") - -This form is supported within the Dataclasses ``field()`` object by using -a lambda to indicate the SQLAlchemy construct inside the ``field()``. -Using :func:`_orm.declared_attr` to surround the lambda is optional. -If we wanted to produce our ``User`` class above where the ORM fields -came from a mixin that is itself a dataclass, the form would be:: - - @dataclass - class UserMixin: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - - addresses: List[Address] = field( - default_factory=list, metadata={"sa": lambda: relationship("Address")} - ) - - @dataclass - class AddressMixin: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - - @mapper_registry.mapped - class User(UserMixin): - pass - - @mapper_registry.mapped - class Address(AddressMixin): - pass - -.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, - namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` - objects with foreign key declarations, to be used within "Dataclasses - with Declarative Table" style mappings. - -.. _orm_declarative_attrs_imperative_table: - -Example Three - attrs with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A mapping using ``@attr.s``, in conjunction with imperative table:: - - import attr - - # other imports - - from sqlalchemy.orm import registry - - mapper_registry = registry() - - - @mapper_registry.mapped - @attr.s - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id = attr.ib() - name = attr.ib() - fullname = attr.ib() - nickname = attr.ib() - addresses = attr.ib() - - # other classes... - -``@dataclass`` and attrs_ mappings may also be used with classical mappings, i.e. -with the :meth:`_orm.registry.map_imperatively` function. See the section -:ref:`orm_imperative_dataclasses` for a similar example. - -.. _dataclasses: https://docs.python.org/3/library/dataclasses.html -.. _attrs: https://pypi.org/project/attrs/ + * :ref:`orm_declarative_styles_toplevel` + * :ref:`orm_declarative_table_config_toplevel` + * :ref:`orm_declarative_mapper_config_toplevel` .. _orm_imperative_mapping: @@ -562,7 +156,6 @@ the :meth:`_orm.registry.map_imperatively` method:: mapper_registry.map_imperatively(User, user_table) - Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`:: From 2eac6545ad08db83954dd3afebf4894a0acb0cea Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 31 Jan 2022 09:50:57 -0500 Subject: [PATCH 111/632] add pg14 to supported implementations Fixes: #7647 Change-Id: I071f1a53714ebb0dc838fddc665640d46666318f (cherry picked from commit 5087dd2a02c6a142146944126022cec735a6f54d) --- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 95c8f416a84..c80f6ec63c6 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -8,7 +8,7 @@ r""" .. dialect:: postgresql :name: PostgreSQL - :full_support: 9.6, 10, 11, 12, 13 + :full_support: 9.6, 10, 11, 12, 13, 14 :normal_support: 9.6+ :best_effort: 8+ From 465359500ade6e1b41d54a1086eb18e3bda99368 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 3 Feb 2022 21:58:14 -0500 Subject: [PATCH 112/632] fall back to SHOW VARIABLES for MySQL < 5.6 Fixed regression caused by :ticket:`7518` where changing the syntax "SHOW VARIABLES" to "SELECT @@" broke compatibility with MySQL versions older than 5.6, including early 5.0 releases. While these are very old MySQL versions, a change in compatibility was not planned, so version-specific logic has been restored to fall back to "SHOW VARIABLES" for MySQL server versions < 5.6. includes unrelated orm/test_expire ordering issue , only showing up on 1.4 / py2.7 but seems to be passing by luck otherwise Fixes: #7518 Change-Id: Ia554080af742f2c3437f88cf3f7a4827b5e55da8 (cherry picked from commit 9f1ed1c68af05eab5851ffd038011e3e3bd36b63) --- doc/build/changelog/unreleased_14/7518.rst | 10 ++++ lib/sqlalchemy/dialects/mysql/base.py | 53 +++++++++++------ lib/sqlalchemy/dialects/mysql/pyodbc.py | 4 +- test/dialect/mysql/test_dialect.py | 69 +++++++++++++++++++++- test/orm/test_expire.py | 6 +- 5 files changed, 118 insertions(+), 24 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7518.rst diff --git a/doc/build/changelog/unreleased_14/7518.rst b/doc/build/changelog/unreleased_14/7518.rst new file mode 100644 index 00000000000..bb5a9bc21b7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7518.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, mysql, regression + :tickets: 7518 + + Fixed regression caused by :ticket:`7518` where changing the syntax "SHOW + VARIABLES" to "SELECT @@" broke compatibility with MySQL versions older + than 5.6, including early 5.0 releases. While these are very old MySQL + versions, a change in compatibility was not planned, so version-specific + logic has been restored to fall back to "SHOW VARIABLES" for MySQL server + versions < 5.6. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 3fa1204207b..260c147ddfe 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -2704,9 +2704,18 @@ def get_sequence_names(self, connection, schema=None, **kw): ] def initialize(self, connection): + # this is driver-based, does not need server version info + # and is fairly critical for even basic SQL operations self._connection_charset = self._detect_charset(connection) + + # call super().initialize() because we need to have + # server_version_info set up. in 1.4 under python 2 only this does the + # "check unicode returns" thing, which is the one area that some + # SQL gets compiled within initialize() currently + default.DefaultDialect.initialize(self, connection) + self._detect_sql_mode(connection) - self._detect_ansiquotes(connection) + self._detect_ansiquotes(connection) # depends on sql mode self._detect_casing(connection) if self._server_ansiquotes: # if ansiquotes == True, build a new IdentifierPreparer @@ -2715,8 +2724,6 @@ def initialize(self, connection): self, server_ansiquotes=self._server_ansiquotes ) - default.DefaultDialect.initialize(self, connection) - self.supports_sequences = ( self.is_mariadb and self.server_version_info >= (10, 3) ) @@ -3102,6 +3109,23 @@ def _setup_parser(self, connection, table_name, schema=None, **kw): sql = parser._describe_to_create(table_name, columns) return parser.parse(sql, charset) + def _fetch_setting(self, connection, setting_name): + charset = self._connection_charset + + if self.server_version_info and self.server_version_info < (5, 6): + sql = "SHOW VARIABLES LIKE '%s'" % setting_name + fetch_col = 1 + else: + sql = "SELECT @@%s" % setting_name + fetch_col = 0 + + show_var = connection.exec_driver_sql(sql) + row = self._compat_first(show_var, charset=charset) + if not row: + return None + else: + return row[fetch_col] + def _detect_charset(self, connection): raise NotImplementedError() @@ -3114,22 +3138,18 @@ def _detect_casing(self, connection): """ # https://dev.mysql.com/doc/refman/en/identifier-case-sensitivity.html - charset = self._connection_charset - show_var = connection.exec_driver_sql( - "SELECT @@lower_case_table_names" - ) - row = self._compat_first(show_var, charset=charset) - if not row: + setting = self._fetch_setting(connection, "lower_case_table_names") + if setting is None: cs = 0 else: # 4.0.15 returns OFF or ON according to [ticket:489] # 3.23 doesn't, 4.0.27 doesn't.. - if row[0] == "OFF": + if setting == "OFF": cs = 0 - elif row[0] == "ON": + elif setting == "ON": cs = 1 else: - cs = int(row[0]) + cs = int(setting) self._casing = cs return cs @@ -3147,19 +3167,16 @@ def _detect_collations(self, connection): return collations def _detect_sql_mode(self, connection): - row = self._compat_first( - connection.exec_driver_sql("SELECT @@sql_mode"), - charset=self._connection_charset, - ) + setting = self._fetch_setting(connection, "sql_mode") - if not row: + if setting is None: util.warn( "Could not retrieve SQL_MODE; please ensure the " "MySQL user has permissions to SHOW VARIABLES" ) self._sql_mode = "" else: - self._sql_mode = row[0] or "" + self._sql_mode = setting or "" def _detect_ansiquotes(self, connection): """Detect and adjust for the ANSI_QUOTES sql mode.""" diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index d5a5c0c9dc4..22d60bd1535 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -89,9 +89,7 @@ def _detect_charset(self, connection): # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. try: - value = connection.exec_driver_sql( - "select @@character_set_client" - ).scalar() + value = self._fetch_setting("character_set_client") if value: return value except exc.DBAPIError: diff --git a/test/dialect/mysql/test_dialect.py b/test/dialect/mysql/test_dialect.py index 016014d5421..1b34af05390 100644 --- a/test/dialect/mysql/test_dialect.py +++ b/test/dialect/mysql/test_dialect.py @@ -5,6 +5,7 @@ from sqlalchemy import bindparam from sqlalchemy import Column from sqlalchemy import DateTime +from sqlalchemy import event from sqlalchemy import exc from sqlalchemy import func from sqlalchemy import Integer @@ -33,6 +34,35 @@ class BackendDialectTest( __backend__ = True __only_on__ = "mysql", "mariadb" + @testing.fixture + def mysql_version_dialect(self, testing_engine): + """yield a MySQL engine that will simulate a specific version. + + patches out various methods to not fail + + """ + engine = testing_engine() + _server_version = [None] + with mock.patch.object( + engine.dialect, + "_get_server_version_info", + lambda conn: engine.dialect._parse_server_version( + _server_version[0] + ), + ), mock.patch.object( + engine.dialect, "_set_mariadb", lambda *arg: None + ), mock.patch.object( + engine.dialect, + "get_isolation_level", + lambda *arg: "REPEATABLE READ", + ): + + def go(server_version): + _server_version[0] = server_version + return engine + + yield go + def test_reserved_words_mysql_vs_mariadb( self, mysql_mariadb_reserved_words ): @@ -55,7 +85,6 @@ def test_reserved_words_mysql_vs_mariadb( ) def test_no_show_variables(self): - from sqlalchemy.testing import mock engine = engines.testing_engine() @@ -75,7 +104,6 @@ def my_execute(self, statement, *args, **kw): engine.connect() def test_no_default_isolation_level(self): - from sqlalchemy.testing import mock engine = engines.testing_engine() @@ -100,6 +128,43 @@ def fake_isolation_level(connection): ): engine.connect() + @testing.combinations( + "10.5.12-MariaDB", "5.6.49", "5.0.2", argnames="server_version" + ) + def test_variable_fetch(self, mysql_version_dialect, server_version): + """test #7518""" + engine = mysql_version_dialect(server_version) + + fetches = [] + + # the initialize() connection does not seem to use engine-level events. + # not changing that here + + @event.listens_for(engine, "do_execute_no_params") + @event.listens_for(engine, "do_execute") + def do_execute_no_params(cursor, statement, *arg): + if statement.startswith("SHOW VARIABLES") or statement.startswith( + "SELECT @@" + ): + fetches.append(statement) + return None + + engine.connect() + + if server_version == "5.0.2": + eq_( + fetches, + [ + "SHOW VARIABLES LIKE 'sql_mode'", + "SHOW VARIABLES LIKE 'lower_case_table_names'", + ], + ) + else: + eq_( + fetches, + ["SELECT @@sql_mode", "SELECT @@lower_case_table_names"], + ) + def test_autocommit_isolation_level(self): c = testing.db.connect().execution_options( isolation_level="AUTOCOMMIT" diff --git a/test/orm/test_expire.py b/test/orm/test_expire.py index 3fba0b446f9..5c958b15096 100644 --- a/test/orm/test_expire.py +++ b/test/orm/test_expire.py @@ -1079,7 +1079,11 @@ def test_partial_expire_lazy(self): self.mapper_registry.map_imperatively( User, users, - properties={"addresses": relationship(Address, backref="user")}, + properties={ + "addresses": relationship( + Address, backref="user", order_by=addresses.c.id + ) + }, ) self.mapper_registry.map_imperatively(Address, addresses) From 2df46ea04ff121ef99e1a9cbac705d3f937c3c15 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Feb 2022 09:04:49 -0500 Subject: [PATCH 113/632] ensure exception raised for all stream w/ sync result Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed to raise an informative exception if the ``stream_results`` execution option were used, which is incompatible with a sync-style :class:`_result.Result` object. An exception is now raised in this scenario in the same way one is already raised when using ``stream_results`` in conjunction with the :meth:`_asyncio.AsyncConnection.execute` method. Additionally, for improved stability with state-sensitive dialects such as asyncmy, the cursor is now closed when this error condition is raised; previously with the asyncmy dialect, the connection would go into an invalid state with unconsumed server side results remaining. Fixes: #7667 Change-Id: I6eb7affe08584889b57423a90258295f8b7085dc (cherry picked from commit faa9ef2cff53bde291df5ac3b5c4ed8f665ecd8c) --- doc/build/changelog/unreleased_14/7667.rst | 15 ++++++++++++ lib/sqlalchemy/engine/cursor.py | 1 + lib/sqlalchemy/engine/result.py | 1 + lib/sqlalchemy/ext/asyncio/engine.py | 19 +++------------ lib/sqlalchemy/ext/asyncio/result.py | 22 ++++++++++++++++++ lib/sqlalchemy/ext/asyncio/session.py | 4 +++- test/ext/asyncio/test_engine_py3k.py | 27 ++++++++++++++++++++++ test/ext/asyncio/test_session_py3k.py | 24 +++++++++++++++++++ 8 files changed, 96 insertions(+), 17 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7667.rst diff --git a/doc/build/changelog/unreleased_14/7667.rst b/doc/build/changelog/unreleased_14/7667.rst new file mode 100644 index 00000000000..d66572feb0f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7667.rst @@ -0,0 +1,15 @@ +.. change:: + :tags: bug, asyncio + :tickets: 7667 + + Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed + to raise an informative exception if the ``stream_results`` execution + option were used, which is incompatible with a sync-style + :class:`_result.Result` object. An exception is now raised in this scenario + in the same way one is already raised when using ``stream_results`` in + conjunction with the :meth:`_asyncio.AsyncConnection.execute` method. + Additionally, for improved stability with state-sensitive dialects such as + asyncmy, the cursor is now closed when this error condition is raised; + previously with the asyncmy dialect, the connection would go into an + invalid state with unconsumed server side results remaining. + diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 7e8c0d7c9f6..abe42c96ad8 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1786,6 +1786,7 @@ class CursorResult(BaseCursorResult, Result): _cursor_metadata = CursorResultMetaData _cursor_strategy_cls = CursorFetchStrategy _no_result_metadata = _NO_RESULT_METADATA + _is_cursor = True def _fetchiter_impl(self): fetchone = self.cursor_strategy.fetchone diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 4264d6d8581..f8006ac53df 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -292,6 +292,7 @@ class ResultInternal(InPlaceGenerative): _generate_rows = True _unique_filter_state = None _post_creational_filter = None + _is_cursor = False @HasMemoized.memoized_attribute def _row_getter(self): diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 0939395c18b..63d148c9757 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -7,6 +7,7 @@ from . import exc as async_exc from .base import ProxyComparable from .base import StartableContext +from .result import _ensure_sync_result from .result import AsyncResult from ... import exc from ... import inspection @@ -380,15 +381,8 @@ async def exec_driver_sql( execution_options, _require_await=True, ) - if result.context._is_server_side: - raise async_exc.AsyncMethodRequired( - "Can't use the connection.exec_driver_sql() method with a " - "server-side cursor." - "Use the connection.stream() method for an async " - "streaming result set." - ) - return result + return await _ensure_sync_result(result, self.exec_driver_sql) async def stream( self, @@ -461,14 +455,7 @@ async def execute( execution_options, _require_await=True, ) - if result.context._is_server_side: - raise async_exc.AsyncMethodRequired( - "Can't use the connection.execute() method with a " - "server-side cursor." - "Use the connection.stream() method for an async " - "streaming result set." - ) - return result + return await _ensure_sync_result(result, self.execute) async def scalar( self, diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index 81ef9915c52..62e4a9a0e54 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -7,6 +7,7 @@ import operator +from . import exc as async_exc from ...engine.result import _NO_ROW from ...engine.result import FilterResult from ...engine.result import FrozenResult @@ -646,3 +647,24 @@ async def one(self): """ return await greenlet_spawn(self._only_one_row, True, True, False) + + +async def _ensure_sync_result(result, calling_method): + if not result._is_cursor: + cursor_result = getattr(result, "raw", None) + else: + cursor_result = result + if cursor_result and cursor_result.context._is_server_side: + await greenlet_spawn(cursor_result.close) + raise async_exc.AsyncMethodRequired( + "Can't use the %s.%s() method with a " + "server-side cursor. " + "Use the %s.stream() method for an async " + "streaming result set." + % ( + calling_method.__self__.__class__.__name__, + calling_method.__name__, + calling_method.__self__.__class__.__name__, + ) + ) + return result diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index b685218d960..0b428d7fedd 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -8,6 +8,7 @@ from . import result as _result from .base import ReversibleProxy from .base import StartableContext +from .result import _ensure_sync_result from ... import util from ...orm import object_session from ...orm import Session @@ -208,7 +209,7 @@ async def execute( else: execution_options = _EXECUTE_OPTIONS - return await greenlet_spawn( + result = await greenlet_spawn( self.sync_session.execute, statement, params=params, @@ -216,6 +217,7 @@ async def execute( bind_arguments=bind_arguments, **kw ) + return await _ensure_sync_result(result, self.execute) async def scalar( self, diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index b302c96cbdb..84358f4ee4f 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -18,6 +18,7 @@ from sqlalchemy.ext.asyncio import async_engine_from_config from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.ext.asyncio import engine as _async_engine +from sqlalchemy.ext.asyncio import exc as async_exc from sqlalchemy.ext.asyncio import exc as asyncio_exc from sqlalchemy.ext.asyncio.base import ReversibleProxy from sqlalchemy.ext.asyncio.engine import AsyncConnection @@ -719,6 +720,32 @@ async def test_inspect_connection(self, async_engine): class AsyncResultTest(EngineFixture): + @async_test + async def test_no_ss_cursor_w_execute(self, async_engine): + users = self.tables.users + async with async_engine.connect() as conn: + conn = await conn.execution_options(stream_results=True) + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncConnection.execute\(\) method with a " + r"server-side cursor. Use the AsyncConnection.stream\(\) " + r"method for an async streaming result set.", + ): + await conn.execute(select(users)) + + @async_test + async def test_no_ss_cursor_w_exec_driver_sql(self, async_engine): + async with async_engine.connect() as conn: + conn = await conn.execution_options(stream_results=True) + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncConnection.exec_driver_sql\(\) " + r"method with a " + r"server-side cursor. Use the AsyncConnection.stream\(\) " + r"method for an async streaming result set.", + ): + await conn.exec_driver_sql("SELECT * FROM users") + @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) diff --git a/test/ext/asyncio/test_session_py3k.py b/test/ext/asyncio/test_session_py3k.py index bcaea05e53f..f04b87f3718 100644 --- a/test/ext/asyncio/test_session_py3k.py +++ b/test/ext/asyncio/test_session_py3k.py @@ -11,6 +11,7 @@ from sqlalchemy import update from sqlalchemy.ext.asyncio import async_object_session from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.ext.asyncio import exc as async_exc from sqlalchemy.ext.asyncio.base import ReversibleProxy from sqlalchemy.orm import relationship from sqlalchemy.orm import selectinload @@ -19,6 +20,7 @@ from sqlalchemy.testing import async_test from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import is_ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock @@ -165,6 +167,28 @@ async def test_stream_partitions(self, async_session, kw): ], ) + @testing.combinations("statement", "execute", argnames="location") + @async_test + async def test_no_ss_cursor_w_execute(self, async_session, location): + User = self.classes.User + + stmt = select(User) + if location == "statement": + stmt = stmt.execution_options(stream_results=True) + + with expect_raises_message( + async_exc.AsyncMethodRequired, + r"Can't use the AsyncSession.execute\(\) method with a " + r"server-side cursor. Use the AsyncSession.stream\(\) " + r"method for an async streaming result set.", + ): + if location == "execute": + await async_session.execute( + stmt, execution_options={"stream_results": True} + ) + else: + await async_session.execute(stmt) + class AsyncSessionTransactionTest(AsyncFixture): run_inserts = None From 1408b33b2caf1118861e9db51949ca1866a3d991 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Feb 2022 15:46:41 -0500 Subject: [PATCH 114/632] amend verbiage for #7667's changelog message clarify this applies to async calling styles only. Change-Id: I42286fe2651be13bd472fac981df9de276ed9bb1 (cherry picked from commit 88bc4f2f66cffe82ed039a622027abd54897280a) --- doc/build/changelog/unreleased_14/7667.rst | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7667.rst b/doc/build/changelog/unreleased_14/7667.rst index d66572feb0f..34dcd44b0ba 100644 --- a/doc/build/changelog/unreleased_14/7667.rst +++ b/doc/build/changelog/unreleased_14/7667.rst @@ -5,11 +5,14 @@ Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed to raise an informative exception if the ``stream_results`` execution option were used, which is incompatible with a sync-style - :class:`_result.Result` object. An exception is now raised in this scenario - in the same way one is already raised when using ``stream_results`` in - conjunction with the :meth:`_asyncio.AsyncConnection.execute` method. - Additionally, for improved stability with state-sensitive dialects such as - asyncmy, the cursor is now closed when this error condition is raised; - previously with the asyncmy dialect, the connection would go into an - invalid state with unconsumed server side results remaining. + :class:`_result.Result` object when using an asyncio calling style, as the + operation to fetch more rows would need to be awaited. An exception is now + raised in this scenario in the same way one was already raised when the + ``stream_results`` option would be used with the + :meth:`_asyncio.AsyncConnection.execute` method. + + Additionally, for improved stability with state-sensitive database drivers + such as asyncmy, the cursor is now closed when this error condition is + raised; previously with the asyncmy dialect, the connection would go into + an invalid state with unconsumed server side results remaining. From 8babeb469374ecae9aa743abea844066ca5d066d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 8 Feb 2022 10:12:33 -0500 Subject: [PATCH 115/632] Accommodate escaped_bind_names for defaults/insert params Fixed issue in Oracle dialect where using a column name that requires quoting when written as a bound parameter, such as ``"_id"``, would not correctly track a Python generated default value due to the bound-parameter rewriting missing this value, causing an Oracle error to be raised. Fixes: #7676 Change-Id: I5a54426d24f2f9b336e3597d5595fb3e031aad97 (cherry picked from commit c2aa6374f3965c28aa2d56cbddf6dab3e1de18a2) --- doc/build/changelog/unreleased_14/7676.rst | 8 ++++++ lib/sqlalchemy/engine/default.py | 5 ++-- lib/sqlalchemy/sql/compiler.py | 30 +++++++++++++++++++--- test/dialect/oracle/test_dialect.py | 29 +++++++++++++++++++++ 4 files changed, 65 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7676.rst diff --git a/doc/build/changelog/unreleased_14/7676.rst b/doc/build/changelog/unreleased_14/7676.rst new file mode 100644 index 00000000000..ec6275fb40c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7676.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, oracle + :tickets: 7676 + + Fixed issue in Oracle dialect where using a column name that requires + quoting when written as a bound parameter, such as ``"_id"``, would not + correctly track a Python generated default value due to the bound-parameter + rewriting missing this value, causing an Oracle error to be raised. diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index fae13a19b1d..d35e5f821ae 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1573,7 +1573,6 @@ def _setup_ins_pk_from_lastrowid(self): def _setup_ins_pk_from_empty(self): getter = self.compiled._inserted_primary_key_from_lastrowid_getter - return [getter(None, param) for param in self.compiled_parameters] def _setup_ins_pk_from_implicit_returning(self, result, rows): @@ -1842,7 +1841,7 @@ def get_update_default(self, column): return self._exec_default(column, column.onupdate, column.type) def _process_executemany_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] + key_getter = self.compiled._within_exec_param_key_getter scalar_defaults = {} @@ -1880,7 +1879,7 @@ def _process_executemany_defaults(self): del self.current_parameters def _process_executesingle_defaults(self): - key_getter = self.compiled._key_getters_for_crud_column[2] + key_getter = self.compiled._within_exec_param_key_getter self.current_parameters = ( compiled_parameters ) = self.compiled_parameters[0] diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 78db3cb2be6..ed290552709 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1290,16 +1290,29 @@ def _create_result_map(self): self._result_columns ) + @util.memoized_property + def _within_exec_param_key_getter(self): + getter = self._key_getters_for_crud_column[2] + if self.escaped_bind_names: + + def _get(obj): + key = getter(obj) + return self.escaped_bind_names.get(key, key) + + return _get + else: + return getter + @util.memoized_property @util.preload_module("sqlalchemy.engine.result") def _inserted_primary_key_from_lastrowid_getter(self): result = util.preloaded.engine_result - key_getter = self._key_getters_for_crud_column[2] + param_key_getter = self._within_exec_param_key_getter table = self.statement.table getters = [ - (operator.methodcaller("get", key_getter(col), None), col) + (operator.methodcaller("get", param_key_getter(col), None), col) for col in table.primary_key ] @@ -1315,6 +1328,12 @@ def _inserted_primary_key_from_lastrowid_getter(self): row_fn = result.result_tuple([col.key for col in table.primary_key]) def get(lastrowid, parameters): + """given cursor.lastrowid value and the parameters used for INSERT, + return a "row" that represents the primary key, either by + using the "lastrowid" or by extracting values from the parameters + that were sent along with the INSERT. + + """ if proc is not None: lastrowid = proc(lastrowid) @@ -1333,7 +1352,7 @@ def get(lastrowid, parameters): def _inserted_primary_key_from_returning_getter(self): result = util.preloaded.engine_result - key_getter = self._key_getters_for_crud_column[2] + param_key_getter = self._within_exec_param_key_getter table = self.statement.table ret = {col: idx for idx, col in enumerate(self.returning)} @@ -1341,7 +1360,10 @@ def _inserted_primary_key_from_returning_getter(self): getters = [ (operator.itemgetter(ret[col]), True) if col in ret - else (operator.methodcaller("get", key_getter(col), None), False) + else ( + operator.methodcaller("get", param_key_getter(col), None), + False, + ) for col in table.primary_key ] diff --git a/test/dialect/oracle/test_dialect.py b/test/dialect/oracle/test_dialect.py index 554e5f18b4b..acabfc8bb80 100644 --- a/test/dialect/oracle/test_dialect.py +++ b/test/dialect/oracle/test_dialect.py @@ -532,6 +532,35 @@ def test_expanding_quote_roundtrip(self, metadata, connection): dict(uid=[1, 2, 3]), ) + @testing.combinations(True, False, argnames="executemany") + def test_python_side_default(self, metadata, connection, executemany): + """test #7676""" + + ids = ["a", "b", "c"] + + def gen_id(): + return ids.pop(0) + + t = Table( + "has_id", + metadata, + Column("_id", String(50), default=gen_id, primary_key=True), + Column("_data", Integer), + ) + metadata.create_all(connection) + + if executemany: + result = connection.execute( + t.insert(), [{"_data": 27}, {"_data": 28}, {"_data": 29}] + ) + eq_( + connection.execute(t.select().order_by(t.c._id)).all(), + [("a", 27), ("b", 28), ("c", 29)], + ) + else: + result = connection.execute(t.insert(), {"_data": 27}) + eq_(result.inserted_primary_key, ("a",)) + class CompatFlagsTest(fixtures.TestBase, AssertsCompiledSQL): def _dialect(self, server_version, **kw): From d0c53282de4d5a224d786134d29ba6632ae1b3fe Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 9 Feb 2022 10:00:24 -0500 Subject: [PATCH 116/632] update Mypy plugin status The Mypy plugin is not maintainable long-term and will be replaced by new APIs that allow for typing to work inline without the need for plugins. Change-Id: Icc7a203df1d0b19bde2fd852719b7b7215774c58 References: #7535 (cherry picked from commit 491e8507fa46e4312302419149d180769c84696e) --- doc/build/orm/extensions/mypy.rst | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index b710d1f4430..f5a22177fe3 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -6,13 +6,29 @@ Mypy / Pep-484 Support for ORM Mappings Support for :pep:`484` typing annotations as well as the `Mypy `_ type checking tool. +.. topic:: SQLAlchemy Mypy Plugin Status Update + + The SQLAlchemy Mypy plugin, while it has technically never left the + "alpha" stage, should **now be considered as legacy**. SQLAlchemy 2.0 + will allow for construction of declarative mappings in place which will + support proper typing directly, without the need for plugins. + + The Mypy plugin itself does not solve the issue of supplying correct typing + with other typing tools such as Pylance/Pyright, Pytype, Pycharm, etc, which + cannot make use of Mypy plugins. Additionally, Mypy plugins are extremely + difficult to develop, maintain and test, as a Mypy plugin must be deeply + integrated with Mypy's internal datastructures and processes, which itself + are not stable within the Mypy project itself. The SQLAlchemy Mypy plugin + has lots of limitations when used with code that deviates from very basic + patterns which are reported regularly. + + For these reasons, new non-regression issues reported against the Mypy + plugin are unlikely to be fixed; the plugin will be supplied with SQLAlchemy + 2.0 as well but will be legacy support only. SQLAlchemy 2.0 code that + makes use of upcoming declarative APIs, which are slightly adjusted from + the existing APIs, will enjoy full compliance with pep-484 as well as + working correctly within IDEs and other typing tools. -.. note:: The Mypy plugin and typing annotations should be regarded as - **alpha level** for the - early 1.4 releases of SQLAlchemy. The plugin has not been tested in real world - scenarios and may have many unhandled cases and error conditions. - Specifics of the new typing stubs are also **subject to change** during - the 1.4 series. Installation ------------ From f6f6fe264b62539ededb44b3b1c1f4b5804b7d51 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 9 Feb 2022 15:39:44 -0500 Subject: [PATCH 117/632] update zimports includes new fix for formatting like black does. also runs black on a few outliers. Change-Id: I67446660a6bc10b73eb710389ae6d3f122af9302 (cherry picked from commit 2579ed2b8f295c7e0ad3d875bf57535623f8df0d) --- .pre-commit-config.yaml | 4 +--- lib/sqlalchemy/testing/engines.py | 4 +++- pyproject.toml | 6 ++++++ test/ext/mypy/incremental/ticket_6435/enum_col_import2.py | 6 ++++-- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae35977b9d0..f8512708ee8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,11 +7,9 @@ repos: - id: black - repo: https://github.com/sqlalchemyorg/zimports - rev: v0.4.0 + rev: v0.5.0 hooks: - id: zimports - args: - - --keep-unused-type-checking - repo: https://github.com/pycqa/flake8 rev: 3.9.2 diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 97809f4b9c2..a92d476ac54 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -280,7 +280,9 @@ def testing_engine( """Produce an engine configured by --options with optional overrides.""" if asyncio: - from sqlalchemy.ext.asyncio import create_async_engine as create_engine + from sqlalchemy.ext.asyncio import ( + create_async_engine as create_engine, + ) elif future or ( config.db and config.db._is_future and future is not False ): diff --git a/pyproject.toml b/pyproject.toml index 0f72578923c..f82dbd468b3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,9 @@ [tool.black] line-length = 79 target-version = ['py27', 'py36'] + +[tool.zimports] +black-line-length = 79 +keep-unused-type-checking = true + + diff --git a/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py b/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py index 4f29932e569..161dce08757 100644 --- a/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py +++ b/test/ext/mypy/incremental/ticket_6435/enum_col_import2.py @@ -1,8 +1,10 @@ from sqlalchemy import Column from sqlalchemy import Enum -from sqlalchemy.orm import declarative_base, Mapped +from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import Mapped from . import enum_col_import1 -from .enum_col_import1 import IntEnum, StrEnum +from .enum_col_import1 import IntEnum +from .enum_col_import1 import StrEnum Base = declarative_base() From 327d425c6355718f8c9d9c90270db1c81c868dea Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 9 Feb 2022 21:59:43 -0500 Subject: [PATCH 118/632] doc fixes * clarify merge behavior for non-present attributes, references #7687 * fix AsyncSession in async_scoped_session documentation, name the scoped session AsyncScopedSession, fixes: #7671 * Use non-deprecated execute() style in sqltypes JSON examples, fixes: #7633 * Add note regarding mitigation for https://github.com/MagicStack/asyncpg/issues/727, fixes #7245 Fixes: #7671 Fixes: #7633 Fixes: #7245 Change-Id: Ic40b4378ca321367a912864f4eddfdd9714fe217 (cherry picked from commit 449389a45f358300ba95f7d03c7b94b64703e31a) --- doc/build/orm/extensions/asyncio.rst | 10 +++---- doc/build/orm/session_state_management.rst | 13 ++++---- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 19 ++++++++++++ lib/sqlalchemy/sql/sqltypes.py | 30 +++++++++++-------- 4 files changed, 49 insertions(+), 23 deletions(-) diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 0851c529681..679d8f0564b 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -712,10 +712,10 @@ constructor:: from sqlalchemy.ext.asyncio import async_scoped_session from sqlalchemy.ext.asyncio import AsyncSession - async_session_factory = sessionmaker(some_async_engine, class_=_AsyncSession) - AsyncSession = async_scoped_session(async_session_factory, scopefunc=current_task) + async_session_factory = sessionmaker(some_async_engine, class_=AsyncSession) + AsyncScopedSession = async_scoped_session(async_session_factory, scopefunc=current_task) - some_async_session = AsyncSession() + some_async_session = AsyncScopedSession() :class:`_asyncio.async_scoped_session` also includes **proxy behavior** similar to that of :class:`.scoped_session`, which means it can be @@ -728,10 +728,10 @@ the usual ``await`` keywords are necessary, including for the some_async_session.add(some_object) # use the AsyncSession via the context-local proxy - await AsyncSession.commit() + await AsyncScopedSession.commit() # "remove" the current proxied AsyncSession for the local context - await AsyncSession.remove() + await AsyncScopedSession.remove() .. versionadded:: 1.4.19 diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 64efffd7614..47b4fbe7fd8 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -204,11 +204,14 @@ When given an instance, it follows these steps: key if not located locally. * If the given instance has no primary key, or if no instance can be found with the primary key given, a new instance is created. -* The state of the given instance is then copied onto the located/newly - created instance. For attributes which are present on the source - instance, the value is transferred to the target instance. For mapped - attributes which aren't present on the source, the attribute is - expired on the target instance, discarding its existing value. +* The state of the given instance is then copied onto the located/newly created + instance. For attribute values which are present on the source instance, the + value is transferred to the target instance. For attribute values that aren't + present on the source instance, the corresponding attribute on the target + instance is :term:`expired` from memory, which discards any locally + present value from the target instance for that attribute, but no + direct modification is made to the database-persisted value for that + attribute. If the ``load=True`` flag is left at its default, this copy process emits events and will load the target object's diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index a8ce60d7006..305ad46a32b 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -96,6 +96,25 @@ stale, nor can it retry the statement as the PostgreSQL transaction is invalidated when these errors occur. +Disabling the PostgreSQL JIT to improve ENUM datatype handling +--------------------------------------------------------------- + +Asyncpg has an `issue `_ when +using PostgreSQL ENUM datatypes, where upon the creation of new database +connections, an expensive query may be emitted in order to retrieve metadata +regarding custom types which has been shown to negatively affect performance. +To mitigate this issue, the PostgreSQL "jit" setting may be disabled from the +client using this setting passed to :func:`_asyncio.create_async_engine`:: + + engine = create_async_engine( + "postgresql+asyncpg://user:password@localhost/tmp", + connect_args={"server_settings": {"jit": "off"}}, + ) + +.. seealso:: + + https://github.com/MagicStack/asyncpg/issues/727 + """ # noqa import collections diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index b53197b05bc..69cb858e509 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2162,7 +2162,7 @@ class JSON(Indexable, TypeEngine): with engine.connect() as conn: conn.execute( data_table.insert(), - data = {"key1": "value1", "key2": "value2"} + {"data": {"key1": "value1", "key2": "value2"}} ) **JSON-Specific Expression Operators** @@ -2258,20 +2258,22 @@ class JSON(Indexable, TypeEngine): **Support for JSON null vs. SQL NULL** - When working with NULL values, the :class:`_types.JSON` - type recommends the + When working with NULL values, the :class:`_types.JSON` type recommends the use of two specific constants in order to differentiate between a column - that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string - of ``"null"``. To insert or select against a value that is SQL NULL, - use the constant :func:`.null`:: + that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string of + ``"null"``. To insert or select against a value that is SQL NULL, use the + constant :func:`.null`. This symbol may be passed as a parameter value + specifically when using the :class:`_types.JSON` datatype, which contains + special logic that interprets this symbol to mean that the column value + should be SQL NULL as opposed to JSON ``"null"``:: from sqlalchemy import null - conn.execute(table.insert(), json_value=null()) + conn.execute(table.insert(), {"json_value": null()}) To insert or select against a value that is JSON ``"null"``, use the constant :attr:`_types.JSON.NULL`:: - conn.execute(table.insert(), json_value=JSON.NULL) + conn.execute(table.insert(), {"json_value": JSON.NULL}) The :class:`_types.JSON` type supports a flag :paramref:`_types.JSON.none_as_null` which when set to True will result @@ -2372,12 +2374,14 @@ def __init__(self, none_as_null=False): """Construct a :class:`_types.JSON` type. :param none_as_null=False: if True, persist the value ``None`` as a - SQL NULL value, not the JSON encoding of ``null``. Note that - when this flag is False, the :func:`.null` construct can still - be used to persist a NULL value:: + SQL NULL value, not the JSON encoding of ``null``. Note that when this + flag is False, the :func:`.null` construct can still be used to + persist a NULL value, which may be passed directly as a parameter + value that is specially interpreted by the :class:`_types.JSON` type + as SQL NULL:: from sqlalchemy import null - conn.execute(table.insert(), data=null()) + conn.execute(table.insert(), {"data": null()}) .. note:: @@ -2703,7 +2707,7 @@ class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine): connection.execute( mytable.insert(), - data=[1,2,3] + {"data": [1,2,3]} ) The :class:`_types.ARRAY` type can be constructed given a fixed number From 30089288d4392e290979a8e8067366688c16fda1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 12 Feb 2022 10:50:45 -0500 Subject: [PATCH 119/632] Raise and re-catch NameError from _ModNS Fixed issue where using a fully qualified path for the classname in :func:`_orm.relationship` that nonetheless contained an incorrect name for path tokens that were not the first token, would fail to raise an informative error and would instead fail randomly at a later step. Fixes: #7697 Change-Id: I5e1a3aa4c2a6ea5b123be14666f589aec43f4b60 (cherry picked from commit 260ade78a70d51378de9e7b9456bfe6218859b6c) --- doc/build/changelog/unreleased_14/7697.rst | 8 ++++ lib/sqlalchemy/orm/clsregistry.py | 2 +- test/orm/declarative/test_clsregistry.py | 43 ++++++++++++++++++++-- 3 files changed, 48 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7697.rst diff --git a/doc/build/changelog/unreleased_14/7697.rst b/doc/build/changelog/unreleased_14/7697.rst new file mode 100644 index 00000000000..03b318cce2a --- /dev/null +++ b/doc/build/changelog/unreleased_14/7697.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 7697 + + Fixed issue where using a fully qualified path for the classname in + :func:`_orm.relationship` that nonetheless contained an incorrect name for + path tokens that were not the first token, would fail to raise an + informative error and would instead fail randomly at a later step. diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 104d7c306f0..2c21498d851 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -257,7 +257,7 @@ def __getattr__(self, key): else: assert isinstance(value, _MultipleClassMarker) return value.attempt_get(self.__parent.path, key) - raise AttributeError( + raise NameError( "Module %r has no mapped classes " "registered under the name %r" % (self.__parent.name, key) ) diff --git a/test/orm/declarative/test_clsregistry.py b/test/orm/declarative/test_clsregistry.py index b77a101e8e1..17e3624b7bb 100644 --- a/test/orm/declarative/test_clsregistry.py +++ b/test/orm/declarative/test_clsregistry.py @@ -1,9 +1,14 @@ +from sqlalchemy import Column from sqlalchemy import exc +from sqlalchemy import Integer from sqlalchemy import MetaData +from sqlalchemy import testing from sqlalchemy.orm import clsregistry from sqlalchemy.orm import registry +from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock @@ -108,6 +113,36 @@ def test_fragment_ambiguous(self): name_resolver("alt.Foo"), ) + @testing.combinations( + ("NonExistentFoo",), + ("nonexistent.Foo",), + ("existent.nonexistent.Foo",), + ("existent.NonExistentFoo",), + ("nonexistent.NonExistentFoo",), + ("existent.existent.NonExistentFoo",), + argnames="name", + ) + def test_name_resolution_failures(self, name, registry): + + Base = registry.generate_base() + + f1 = MockClass(registry, "existent.Foo") + f2 = MockClass(registry, "existent.existent.Foo") + clsregistry.add_class("Foo", f1, registry._class_registry) + clsregistry.add_class("Foo", f2, registry._class_registry) + + class MyClass(Base): + __tablename__ = "my_table" + id = Column(Integer, primary_key=True) + foo = relationship(name) + + with expect_raises_message( + exc.InvalidRequestError, + r"When initializing mapper .*MyClass.*, expression '%s' " + r"failed to locate a name" % (name,), + ): + registry.configure() + def test_no_fns_in_name_resolve(self): base = registry() f1 = MockClass(base, "foo.bar.Foo") @@ -241,7 +276,7 @@ def test_module_reg_cleanout_race(self): f_resolver = resolver("foo") del mod_entry.contents["Foo"] assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Foo'", lambda: f_resolver().bar.Foo, @@ -249,7 +284,7 @@ def test_module_reg_cleanout_race(self): f_resolver = name_resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Foo'", lambda: f_resolver().bar.Foo, @@ -264,7 +299,7 @@ def test_module_reg_no_class(self): name_resolver, resolver = clsregistry._resolver(f1, MockProp()) f_resolver = resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Bat'", lambda: f_resolver().bar.Bat, @@ -272,7 +307,7 @@ def test_module_reg_no_class(self): f_resolver = name_resolver("foo") assert_raises_message( - AttributeError, + NameError, "Module 'bar' has no mapped classes registered " "under the name 'Bat'", lambda: f_resolver().bar.Bat, From 84ef5f5b061579574360b1044e1d8a9d89059a9b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 15 Feb 2022 08:37:44 -0500 Subject: [PATCH 120/632] add note reiterating do_orm_execute() is for ORM statements only Fixes: #7707 Change-Id: I7216f63d9f4269059438e41ad8dd3dffc25a5d03 (cherry picked from commit 619abb52b6f1ee023db0f85fd96ba9f88c8efa7b) --- lib/sqlalchemy/orm/events.py | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 9a6acbd4225..2bef6394d42 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1351,7 +1351,7 @@ def my_before_commit(session): """ - _target_class_doc = "SomeSessionOrFactory" + _target_class_doc = "SomeSessionClassOrObject" _dispatch_target = Session @@ -1419,15 +1419,31 @@ def wrap(session, state, *arg, **kw): event_key.base_listen(**kw) def do_orm_execute(self, orm_execute_state): - """Intercept statement executions that occur in terms of a :class:`.Session`. - - This event is invoked for all top-level SQL statements invoked - from the :meth:`_orm.Session.execute` method. As of SQLAlchemy 1.4, - all ORM queries emitted on behalf of a :class:`_orm.Session` will - flow through this method, so this event hook provides the single - point at which ORM queries of all types may be intercepted before - they are invoked, and additionally to replace their execution with - a different process. + """Intercept statement executions that occur on behalf of an + ORM :class:`.Session` object. + + This event is invoked for all top-level SQL statements invoked from the + :meth:`_orm.Session.execute` method, as well as related methods such as + :meth:`_orm.Session.scalars` and :meth:`_orm.Session.scalar`. As of + SQLAlchemy 1.4, all ORM queries emitted on behalf of a + :class:`_orm.Session` will flow through this method, so this event hook + provides the single point at which ORM queries of all types may be + intercepted before they are invoked, and additionally to replace their + execution with a different process. + + .. note:: The :meth:`_orm.SessionEvents.do_orm_execute` event hook + is triggered **for ORM statement executions only**, meaning those + invoked via the :meth:`_orm.Session.execute` and similar methods on + the :class:`_orm.Session` object. It does **not** trigger for + statements that are invoked by SQLAlchemy Core only, i.e. statements + invoked directly using :meth:`_engine.Connection.execute` or + otherwise originating from an :class:`_engine.Engine` object without + any :class:`_orm.Session` involved. To intercept **all** SQL + executions regardless of whether the Core or ORM APIs are in use, + see the event hooks at + :class:`.ConnectionEvents`, such as + :meth:`.ConnectionEvents.before_execute` and + :meth:`.ConnectionEvents.before_cursor_execute`. This event is a ``do_`` event, meaning it has the capability to replace the operation that the :meth:`_orm.Session.execute` method normally From 38fee68f476c6cd7b8347bfb15e3d88a00d0a952 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 15 Feb 2022 10:53:07 -0500 Subject: [PATCH 121/632] correct for non-deterministic gc artifacts Observed the tests here have different profiling counts when run individually vs. as a group, and this seems to be due to whether or not results of each query are garbage collected or not. for all but one test, ensuring results stay between query runs seems to meet the current profiling counts. Change-Id: I5aca5db08936757ad2a6055c5fc077cc58979bdd (cherry picked from commit bcd4e36ab466e8e300bd704dd3b7e6e8470a2972) --- test/aaa_profiling/test_orm.py | 36 +++- test/profiles.txt | 345 +++++++++++++++++++++------------ 2 files changed, 253 insertions(+), 128 deletions(-) diff --git a/test/aaa_profiling/test_orm.py b/test/aaa_profiling/test_orm.py index 5d081b933ec..65b1547a183 100644 --- a/test/aaa_profiling/test_orm.py +++ b/test/aaa_profiling/test_orm.py @@ -1225,7 +1225,9 @@ def test_no_bundle(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1239,7 +1241,9 @@ def test_no_entity_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1251,7 +1255,9 @@ def test_no_entity_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1263,7 +1269,9 @@ def test_entity_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1276,7 +1284,9 @@ def test_entity_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1289,7 +1299,9 @@ def test_no_bundle_wo_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1301,7 +1313,9 @@ def test_no_bundle_w_annotations(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1314,7 +1328,9 @@ def test_bundle_wo_annotation(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() @@ -1326,6 +1342,8 @@ def test_bundle_w_annotation(self): @profiling.function_call_count(warmup=1) def go(): for i in range(100): - q.all() + # test counts assume objects remain in the session + # from previous run + r = q.all() # noqa F841 go() diff --git a/test/profiles.txt b/test/profiles.txt index b99be234662..ab19468b37d 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -27,8 +27,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 66 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 68 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 68 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 68 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 68 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 67 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 67 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 72 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 72 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 73 @@ -37,8 +39,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 73 test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 73 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 73 -test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 73 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 72 +test.aaa_profiling.test_compiler.CompileTest.test_insert x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 72 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_select @@ -50,8 +52,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_mssql_pyodbc_dbapiunicode_nocextensions 181 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 181 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 181 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 181 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 181 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 180 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 180 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 195 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 195 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 196 @@ -60,12 +64,15 @@ test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 196 test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 196 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 196 -test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 196 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 195 +test.aaa_profiling.test_compiler.CompileTest.test_select x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 195 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_select_labels -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 205 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 204 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 204 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 219 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 219 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 212 @@ -74,8 +81,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpy test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 212 test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 212 -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 212 -test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 212 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 219 +test.aaa_profiling.test_compiler.CompileTest.test_select_labels x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 219 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_update @@ -93,8 +100,10 @@ test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2. test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 79 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 79 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 79 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 79 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 79 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 78 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 78 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 81 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 81 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 82 @@ -103,8 +112,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3. test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 82 test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 82 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 82 -test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 82 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 81 +test.aaa_profiling.test_compiler.CompileTest.test_update x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 81 # TEST: test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause @@ -118,6 +127,8 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linu test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 169 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 169 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 169 +test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 175 +test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 175 test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 175 @@ -131,11 +142,15 @@ test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause x86_64_linu # TEST: test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 303 +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 303 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 303 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 303 # TEST: test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5003 +test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5003 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 5403 test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 5403 @@ -143,76 +158,98 @@ test.aaa_profiling.test_misc.CacheKeyTest.test_statement_key_is_not_cached x86_6 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1328 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1328 +test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 929 +test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 929 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 929 test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_members x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 929 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 49105 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 60305 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 52805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 64905 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 46635 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56845 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61445 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 61445 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 59005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 51505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 63605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45735 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 55945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 49435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 60545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 49435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 60545 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 60405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 54805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 64405 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50835 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53935 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 62545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53935 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 62545 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 59505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 63505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 49935 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 57645 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61645 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 61645 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45205 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 48905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 47705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 52305 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 43035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 45745 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 45535 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 49145 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 45535 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 49145 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47305 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 50405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 60005 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45235 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 48335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 48335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 56945 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 46405 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 55105 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 49505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 59105 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 44335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 47435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 47435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 56045 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33505 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 33705 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 36605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 31805 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 34405 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 34605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 37505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 34605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 37505 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30005 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 32605 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 32805 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 35705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30905 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 33705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 36605 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 33705 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 36605 # TEST: test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 3358 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 3358 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 3479 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 3479 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 3479 test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 3479 @@ -220,6 +257,8 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_attribute_set x86_64_linu test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5327 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 5327 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 5529 +test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 5529 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 5529 test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 5529 @@ -227,34 +266,44 @@ test.aaa_profiling.test_orm.AttributeOverheadTest.test_collection_append_remove test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 68 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 68 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 73 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 73 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 73 test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_key_bound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 73 # TEST: test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 387 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 387 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 392 -test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 392 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 383 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 383 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 388 +test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 388 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15236 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26249 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 15264 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27281 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15162 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26175 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15190 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27207 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 15190 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27207 # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21341 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26354 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 21382 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27399 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21303 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26316 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 21344 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27361 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 21344 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 27361 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 9853 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 10003 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 10304 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 10454 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 10304 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 10454 @@ -262,6 +311,8 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_c test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1103 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1103 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1104 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1104 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1104 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1104 @@ -269,97 +320,125 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x8 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 4053 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 4203 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 4054 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 4204 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 4054 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 4204 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 95938 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 96088 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103539 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 96088 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 96238 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103839 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103689 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103839 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 93988 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94138 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 101889 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 94138 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94288 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 102189 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 102039 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 102189 # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 496829 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 498671 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 528695 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 530537 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 495703 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 497535 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 527563 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 529405 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 527563 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 529405 # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425805 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 443405 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 431505 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 450605 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425705 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 443305 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 430805 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 449905 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 430205 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 450505 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 21984 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 22984 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 22984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 22984 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 22984 # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 102029 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 106786 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 106348 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 112356 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 103486 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 108243 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 107759 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 113767 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 107759 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 113767 # TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 19799 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20301 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 20739 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 21307 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 19841 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20287 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 20731 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 21299 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 20731 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 21299 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1339 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1373 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1399 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1444 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1396 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1436 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1460 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1511 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1460 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1511 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 101,17 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 101,17 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 103,18 # TEST: test.aaa_profiling.test_orm.QueryTest.test_query_cols -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5844 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6604 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 6152 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6942 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5842 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6602 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 6150 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6940 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 6150 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6940 # TEST: test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 251605 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 269605 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 262205 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 281705 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 253005 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 271105 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 263605 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 283105 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 263605 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 283105 # TEST: test.aaa_profiling.test_orm.SessionTest.test_expire_lots -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1149 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1144 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1269 -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1258 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1141 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1146 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1252 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1256 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1255 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 1264 # TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 90 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 90 +test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 74 +test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 74 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 74 test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 74 @@ -367,6 +446,8 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect x86_64_linux_cpyth test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 33 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33 +test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 24 +test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 24 test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 24 @@ -388,6 +469,8 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 53 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 53 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 55 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 55 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_connection_execute x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 55 @@ -417,6 +500,8 @@ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_ test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 92 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 94 +test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 94 test.aaa_profiling.test_resultset.ExecutionTest.test_minimal_engine_execute x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 94 @@ -446,6 +531,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 17 +test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_contains_doesnt_compile x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 17 @@ -473,8 +560,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 43564 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 1551 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 13553 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1489 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 13508 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1488 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 13490 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1507 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 13511 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 1584 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 13588 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 88324 @@ -483,8 +572,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 13581 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 1572 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 13576 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1525 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 13529 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1507 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_legacy x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 13511 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings @@ -502,8 +591,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_6 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 45571 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 2554 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 15556 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 2492 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 15511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 2491 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 15493 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 2511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 15515 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 2588 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 15592 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 89328 @@ -512,8 +603,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_6 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 15585 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 2576 test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 15580 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 2529 -test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 15533 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 2511 +test.aaa_profiling.test_resultset.ResultSetTest.test_fetch_by_key_mappings x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 15515 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] @@ -533,6 +624,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 14 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 15 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-0] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -562,6 +655,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 25 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-1] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -591,6 +686,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 16 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 14 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 16 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 23 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 25 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[False-2] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 20 @@ -620,6 +717,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_ test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 19 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 17 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 19 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 18 +test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 20 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 28 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 30 test.aaa_profiling.test_resultset.ResultSetTest.test_one_or_none[True-1] x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 25 @@ -649,6 +748,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpy test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6283 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 233 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6253 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 227 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6227 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_string x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87009 @@ -678,6 +779,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cp test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6283 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 233 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6253 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 227 +test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6227 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6269 test.aaa_profiling.test_resultset.ResultSetTest.test_raw_unicode x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87009 @@ -705,8 +808,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 36570 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 549 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6551 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 487 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 486 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6488 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6510 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 583 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6587 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87323 @@ -715,8 +820,8 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 6580 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 571 test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 6575 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 524 -test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6528 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_string x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6510 # TEST: test.aaa_profiling.test_resultset.ResultSetTest.test_unicode @@ -734,8 +839,10 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpytho test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_oracle_cx_oracle_dbapiunicode_nocextensions 36570 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_cextensions 549 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_postgresql_psycopg2_dbapiunicode_nocextensions 6551 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 487 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 486 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6488 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6510 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_cextensions 583 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_mysqldb_dbapiunicode_nocextensions 6587 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mariadb_pymysql_dbapiunicode_cextensions 87323 @@ -744,5 +851,5 @@ test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpytho test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_mssql_pyodbc_dbapiunicode_nocextensions 6580 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_cextensions 571 test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_postgresql_psycopg2_dbapiunicode_nocextensions 6575 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 524 -test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6528 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 506 +test.aaa_profiling.test_resultset.ResultSetTest.test_unicode x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_nocextensions 6510 From c98c9d7133d6a3eb1b92a95a565d704bcbbcaa4e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 17 Feb 2022 15:13:41 -0500 Subject: [PATCH 122/632] clarify SQLAlchemy version 2.0 constructs are not yet available Fixes: #7726 Change-Id: I30646b9da5d4de6a075dedb4b42fd00b3ed7d969 (cherry picked from commit 86e42fbc83c78a577c1b7079f0aae7cbac5b29d5) (cherry picked from commit c5756b11a0eceb6ba4c868b52daf676652940835) --- doc/build/orm/extensions/mypy.rst | 40 ++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index f5a22177fe3..aa0a7f32d8a 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -8,10 +8,16 @@ Support for :pep:`484` typing annotations as well as the .. topic:: SQLAlchemy Mypy Plugin Status Update - The SQLAlchemy Mypy plugin, while it has technically never left the - "alpha" stage, should **now be considered as legacy**. SQLAlchemy 2.0 - will allow for construction of declarative mappings in place which will - support proper typing directly, without the need for plugins. + **Updated February 17, 2022** + + The SQLAlchemy Mypy plugin, while it has technically never left the "alpha" + stage, should **now be considered as legacy, even though it is still + necessary for full Mypy support when using SQLAlchemy 1.4**. SQLAlchemy + version 2.0, when released, will include new constructs that will allow for + construction of declarative mappings in place which will support proper + typing directly, without the need for plugins. This new feature is **not + part of SQLAlchemy 1.4, it is only in SQLAlchemy 2.0, which is not released + yet as of Feb 17, 2022**. The Mypy plugin itself does not solve the issue of supplying correct typing with other typing tools such as Pylance/Pyright, Pytype, Pycharm, etc, which @@ -23,11 +29,27 @@ Support for :pep:`484` typing annotations as well as the patterns which are reported regularly. For these reasons, new non-regression issues reported against the Mypy - plugin are unlikely to be fixed; the plugin will be supplied with SQLAlchemy - 2.0 as well but will be legacy support only. SQLAlchemy 2.0 code that - makes use of upcoming declarative APIs, which are slightly adjusted from - the existing APIs, will enjoy full compliance with pep-484 as well as - working correctly within IDEs and other typing tools. + plugin are unlikely to be fixed. When SQLAlchemy 2.0 is released, it will + continue to include the plugin, which will have been updated to continue to + function as well as it does in SQLAlchemy 1.4, when running under SQLAlchemy + 2.0. **Existing code that passes Mypy checks using the plugin with + SQLAlchemy 1.4 installed will continue to pass all checks in SQLAlchemy 2.0 + without any changes required, provided the plugin is still used. The + upcoming API to be released with SQLAlchemy 2.0 is fully backwards + compatible with the SQLAlchemy 1.4 API and Mypy plugin behavior.** + + End-user code that passes all checks under SQLAlchemy 1.4 with the Mypy + plugin will be able to incrementally migrate to the new structures, once + that code is running exclusively on SQLAlchemy 2.0. The change consists of + altering how the :func:`_orm.declarative_base` construct is produced, and + then the replacement of inline Declarative :class:`_schema.Column` + structures with a fully cross-compatible ``mapped_column()`` construct. Both + constructs can coexist on any declaratively mapped class. + + Code that is running exclusively on **not-released-yet** SQLAlchemy version + 2.0 and has fully migrated to the new declarative constructs will enjoy full + compliance with pep-484 as well as working correctly within IDEs and other + typing tools, without the need for plugins. Installation From 63c398b7cf500b1a60eb383d30ff60fc36e3a806 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 19 Feb 2022 14:11:19 -0500 Subject: [PATCH 123/632] updates for mariadb connector 1.0.10 Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 where the DBAPI no longer pre-buffers cursor.lastrowid. The dialect now fetches this value proactively for situations where it applies. test_invalidate_on_results seems to pass for mariadbconnector now. the driver has likely changed how it buffers result sets. This is a major change for them to make in a point release so we might want to watch this in case they reverse course again. Fixes: #7738 Change-Id: I9610aae01d1ae42fa92ffbc7123a6948e40ec9dd (cherry picked from commit e120837b682a3a822c2dff136ad48b1ca9fb6ce2) --- doc/build/changelog/unreleased_14/7738.rst | 7 +++++++ lib/sqlalchemy/dialects/mysql/mariadbconnector.py | 9 +++++++++ lib/sqlalchemy/engine/default.py | 1 - setup.cfg | 3 ++- test/engine/test_reconnect.py | 1 - 5 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7738.rst diff --git a/doc/build/changelog/unreleased_14/7738.rst b/doc/build/changelog/unreleased_14/7738.rst new file mode 100644 index 00000000000..dbb028fde38 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7738.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mariadb, regression + :tickets: 7738 + + Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 + where the DBAPI no longer pre-buffers cursor.lastrowid. The dialect now + fetches this value proactively for situations where it applies. diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index f3130488780..c8b2eada6dc 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -39,12 +39,21 @@ class MySQLExecutionContext_mariadbconnector(MySQLExecutionContext): + _lastrowid = None + def create_server_side_cursor(self): return self._dbapi_connection.cursor(buffered=False) def create_default_cursor(self): return self._dbapi_connection.cursor(buffered=True) + def post_exec(self): + if self.isinsert and self.compiled.postfetch_lastrowid: + self._lastrowid = self.cursor.lastrowid + + def get_lastrowid(self): + return self._lastrowid + class MySQLCompiler_mariadbconnector(MySQLCompiler): pass diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index d35e5f821ae..5a1443ecbc1 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1565,7 +1565,6 @@ def inserted_primary_key_rows(self): return self._setup_ins_pk_from_empty() def _setup_ins_pk_from_lastrowid(self): - getter = self.compiled._inserted_primary_key_from_lastrowid_getter lastrowid = self.get_lastrowid() diff --git a/setup.cfg b/setup.cfg index f432561b189..49f829f7257 100644 --- a/setup.cfg +++ b/setup.cfg @@ -170,7 +170,8 @@ aiomysql = mysql+aiomysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 aiomysql_fallback = mysql+aiomysql://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4&async_fallback=true asyncmy = mysql+asyncmy://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4 asyncmy_fallback = mysql+asyncmy://scott:tiger@127.0.0.1:3306/test?charset=utf8mb4&async_fallback=true -mariadb = mariadb://scott:tiger@127.0.0.1:3306/test +mariadb = mariadb+mysqldb://scott:tiger@127.0.0.1:3306/test +mariadb_connector = mariadb+mariadbconnector://scott:tiger@127.0.0.1:3306/test mssql = mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server mssql_pymssql = mssql+pymssql://scott:tiger@ms_2008 docker_mssql = mssql+pymssql://scott:tiger^5HHH@127.0.0.1:1433/test diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index 9579d6c2dc9..bd597a96b79 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -1381,7 +1381,6 @@ def teardown_test(self): ) @testing.fails_if( [ - "+mariadbconnector", "+mysqlconnector", "+mysqldb", "+cymysql", From bbc40f1f32c6495f4e6dd7546206affb78d6cbe2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 19 Feb 2022 13:51:22 -0500 Subject: [PATCH 124/632] remove never-used get_result_cursor_strategy() method This method I would assume got committed during the 1.4 engine refactor, where we moved from different kinds of ResultProxy implementations to different strategy classes instead. These strategies are set up by dialects by setting "self.cursor_fetch_strategy" in the execution context. The method here was likely a previous iteration of that which got merged but was never used. Change-Id: Iec292428f41c2c245bf7ae78beaa14786c28846c (cherry picked from commit ec4a4910aa9ecc516cf3b096cb053fd9be7f82cc) --- lib/sqlalchemy/dialects/mssql/base.py | 9 ---- lib/sqlalchemy/engine/interfaces.py | 67 --------------------------- 2 files changed, 76 deletions(-) diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 5d8e50213ab..2006763b13b 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1625,7 +1625,6 @@ class MSExecutionContext(default.DefaultExecutionContext): _select_lastrowid = False _lastrowid = None _rowcount = None - _result_strategy = None def _opt_encode(self, statement): @@ -1757,14 +1756,6 @@ def handle_dbapi_exception(self, e): except Exception: pass - def get_result_cursor_strategy(self, result): - if self._result_strategy: - return self._result_strategy - else: - return super(MSExecutionContext, self).get_result_cursor_strategy( - result - ) - def fire_sequence(self, seq, type_): return self._execute_scalar( ( diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index e86fa2b6e78..4f2524aef2d 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1418,10 +1418,6 @@ def get_out_parameter_values(self, out_param_names): set. This replaces the practice of setting out parameters within the now-removed ``get_result_proxy()`` method. - .. seealso:: - - :meth:`.ExecutionContext.get_result_cursor_strategy` - """ raise NotImplementedError() @@ -1435,69 +1431,6 @@ def post_exec(self): raise NotImplementedError() - def get_result_cursor_strategy(self, result): - """Return a result cursor strategy for a given result object. - - This method is implemented by the :class:`.DefaultDialect` and is - only needed by implementing dialects in the case where some special - steps regarding the cursor must be taken, such as manufacturing - fake results from some other element of the cursor, or pre-buffering - the cursor's results. - - A simplified version of the default implementation is:: - - from sqlalchemy.engine.result import DefaultCursorFetchStrategy - - class MyExecutionContext(DefaultExecutionContext): - def get_result_cursor_strategy(self, result): - return DefaultCursorFetchStrategy.create(result) - - Above, the :class:`.DefaultCursorFetchStrategy` will be applied - to the result object. For results that are pre-buffered from a - cursor that might be closed, an implementation might be:: - - - from sqlalchemy.engine.result import ( - FullyBufferedCursorFetchStrategy - ) - - class MyExecutionContext(DefaultExecutionContext): - _pre_buffered_result = None - - def pre_exec(self): - if self.special_condition_prebuffer_cursor(): - self._pre_buffered_result = ( - self.cursor.description, - self.cursor.fetchall() - ) - - def get_result_cursor_strategy(self, result): - if self._pre_buffered_result: - description, cursor_buffer = self._pre_buffered_result - return ( - FullyBufferedCursorFetchStrategy. - create_from_buffer( - result, description, cursor_buffer - ) - ) - else: - return DefaultCursorFetchStrategy.create(result) - - This method replaces the previous not-quite-documented - ``get_result_proxy()`` method. - - .. versionadded:: 1.4 - result objects now interpret cursor results - based on a pluggable "strategy" object, which is delivered - by the :class:`.ExecutionContext` via the - :meth:`.ExecutionContext.get_result_cursor_strategy` method. - - .. seealso:: - - :meth:`.ExecutionContext.get_out_parameter_values` - - """ - raise NotImplementedError() - def handle_dbapi_exception(self, e): """Receive a DBAPI exception which occurred upon execute, result fetch, etc.""" From 9d0a17ad27e9eab14bd52ec54fbfb4803c74a535 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 20 Feb 2022 09:51:22 -0500 Subject: [PATCH 125/632] improve reflection of inline UNIQUE constraints Fixed issue where SQLite unique constraint reflection would not work for an inline UNIQUE constraint where the column name had an underscore in its name. Added support for reflecting SQLite inline unique constraints where the column names are formatted with SQLite "escape quotes" ``[]`` or `` ` ``, which are discarded by the database when producing the column name. Fixes: #7736 Change-Id: I635003478dc27193995f7d7a6448f9333a498706 (cherry picked from commit 834af17a469fd1893acf20225e8400c0c908053f) --- doc/build/changelog/unreleased_14/7736.rst | 16 +++++++ lib/sqlalchemy/dialects/sqlite/base.py | 3 +- test/dialect/test_sqlite.py | 50 ++++++++++++++++++++++ 3 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7736.rst diff --git a/doc/build/changelog/unreleased_14/7736.rst b/doc/build/changelog/unreleased_14/7736.rst new file mode 100644 index 00000000000..828dd540a4c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7736.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, sqlite + :tickets: 7736 + + Fixed issue where SQLite unique constraint reflection would not work + for an inline UNIQUE constraint where the column name had an underscore + in its name. + +.. change:: + :tags: usecase, sqlite + :tickets: 7736 + + Added support for reflecting SQLite inline unique constraints where + the column names are formatted with SQLite "escape quotes" ``[]`` + or `` ` ``, which are discarded by the database when producing the + column name. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index bcea17620f3..7ba9700d709 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2414,7 +2414,8 @@ def get_unique_constraints( def parse_uqs(): UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)' INLINE_UNIQUE_PATTERN = ( - r'(?:(".+?")|([a-z0-9]+)) ' r"+[a-z0-9_ ]+? +UNIQUE" + r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) ' + r"+[a-z0-9_ ]+? +UNIQUE" ) for match in re.finditer(UNIQUE_PATTERN, table_data, re.I): diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 2e0eccc96bb..6230c7f9459 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -42,6 +42,7 @@ from sqlalchemy.engine.url import make_url from sqlalchemy.schema import CreateTable from sqlalchemy.schema import FetchedValue +from sqlalchemy.sql.elements import quoted_name from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL @@ -2401,6 +2402,55 @@ def test_check_constraint(self): ], ) + @testing.combinations( + ("plain_name", "plain_name"), + ("name with spaces", "name with spaces"), + ("plainname", "plainname"), + ("[Code]", "[Code]"), + (quoted_name("[Code]", quote=False), "Code"), + argnames="colname,expected", + ) + @testing.combinations( + "uq", "uq_inline", "pk", "ix", argnames="constraint_type" + ) + def test_constraint_cols( + self, colname, expected, constraint_type, connection, metadata + ): + if constraint_type == "uq_inline": + t = Table("t", metadata, Column(colname, Integer)) + connection.exec_driver_sql( + """ + CREATE TABLE t (%s INTEGER UNIQUE) + """ + % connection.dialect.identifier_preparer.quote(colname) + ) + else: + t = Table("t", metadata, Column(colname, Integer)) + if constraint_type == "uq": + constraint = UniqueConstraint(t.c[colname]) + elif constraint_type == "pk": + constraint = PrimaryKeyConstraint(t.c[colname]) + elif constraint_type == "ix": + constraint = Index("some_index", t.c[colname]) + else: + assert False + + t.append_constraint(constraint) + + t.create(connection) + + if constraint_type in ("uq", "uq_inline"): + const = inspect(connection).get_unique_constraints("t")[0] + eq_(const["column_names"], [expected]) + elif constraint_type == "pk": + const = inspect(connection).get_pk_constraint("t") + eq_(const["constrained_columns"], [expected]) + elif constraint_type == "ix": + const = inspect(connection).get_indexes("t")[0] + eq_(const["column_names"], [expected]) + else: + assert False + class SavepointTest(fixtures.TablesTest): From 2b72c095732939cc96424bc2b7186d5f1723a1db Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Feb 2022 12:50:36 -0500 Subject: [PATCH 126/632] support add_cte() for TextualSelect Fixed issue where the :meth:`.HasCTE.add_cte` method as called upon a :class:`.TextualSelect` instance was not being accommodated by the SQL compiler. The fix additionally adds more "SELECT"-like compiler behavior to :class:`.TextualSelect` including that DML CTEs such as UPDATE and INSERT may be accommodated. Fixes: #7760 Change-Id: Id97062d882e9b2a81b8e31c2bfaa9cfc5f77d5c1 (cherry picked from commit bef67e58121704a9836e1e5ec2d361cd2086036c) --- doc/build/changelog/unreleased_14/7760.rst | 9 +++ lib/sqlalchemy/orm/context.py | 64 +++++++++++++++++++++- lib/sqlalchemy/sql/compiler.py | 18 +++++- test/sql/test_cte.py | 45 +++++++++++++++ 4 files changed, 133 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7760.rst diff --git a/doc/build/changelog/unreleased_14/7760.rst b/doc/build/changelog/unreleased_14/7760.rst new file mode 100644 index 00000000000..2f0d403dd85 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7760.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 7760 + + Fixed issue where the :meth:`.HasCTE.add_cte` method as called upon a + :class:`.TextualSelect` instance was not being accommodated by the SQL + compiler. The fix additionally adds more "SELECT"-like compiler behavior to + :class:`.TextualSelect` including that DML CTEs such as UPDATE and INSERT + may be accommodated. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index a81bf557399..7a63543a650 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -22,6 +22,7 @@ from .. import inspect from .. import sql from .. import util +from ..sql import ClauseElement from ..sql import coercions from ..sql import expression from ..sql import roles @@ -494,8 +495,8 @@ def create_for_statement(cls, statement_container, compiler, **kw): entity.setup_compile_state(self) # we did the setup just to get primary columns. - self.statement = expression.TextualSelect( - self.statement, self.primary_columns, positional=False + self.statement = _AdHocColumnsStatement( + self.statement, self.primary_columns ) else: # allow TextualSelect with implicit columns as well @@ -522,6 +523,65 @@ def _get_current_adapter(self): return None +class _AdHocColumnsStatement(ClauseElement): + """internal object created to somewhat act like a SELECT when we + are selecting columns from a DML RETURNING. + + + """ + + __visit_name__ = None + + def __init__(self, text, columns): + self.element = text + self.column_args = [ + coercions.expect(roles.ColumnsClauseRole, c) for c in columns + ] + + def _generate_cache_key(self): + raise NotImplementedError() + + def _gen_cache_key(self, anon_map, bindparams): + raise NotImplementedError() + + def _compiler_dispatch( + self, compiler, compound_index=None, asfrom=False, **kw + ): + """provide a fixed _compiler_dispatch method.""" + + toplevel = not compiler.stack + entry = ( + compiler._default_stack_entry if toplevel else compiler.stack[-1] + ) + + populate_result_map = ( + toplevel + # these two might not be needed + or ( + compound_index == 0 + and entry.get("need_result_map_for_compound", False) + ) + or entry.get("need_result_map_for_nested", False) + ) + + if populate_result_map: + compiler._ordered_columns = ( + compiler._textual_ordered_columns + ) = False + + # enable looser result column matching. this is shown to be + # needed by test_query.py::TextTest + compiler._loose_column_name_matching = True + + for c in self.column_args: + compiler.process( + c, + within_columns_clause=True, + add_to_result_map=compiler._add_to_result_map, + ) + return compiler.process(self.element, **kw) + + @sql.base.CompileState.plugin_for("orm", "select") class ORMSelectCompileState(ORMCompileState, SelectState): _joinpath = _joinpoint = _EMPTY_DICT diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index ed290552709..6be8ae281fe 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1621,6 +1621,17 @@ def visit_textual_select( toplevel = not self.stack entry = self._default_stack_entry if toplevel else self.stack[-1] + new_entry = { + "correlate_froms": set(), + "asfrom_froms": set(), + "selectable": taf, + } + self.stack.append(new_entry) + + if taf._independent_ctes: + for cte in taf._independent_ctes: + cte._compiler_dispatch(self, **kw) + populate_result_map = ( toplevel or ( @@ -1648,7 +1659,12 @@ def visit_textual_select( add_to_result_map=self._add_to_result_map, ) - return self.process(taf.element, **kw) + text = self.process(taf.element, **kw) + if self.ctes: + nesting_level = len(self.stack) if not toplevel else None + text = self._render_cte_clause(nesting_level=nesting_level) + text + + return text def visit_null(self, expr, **kw): return "NULL" diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index df9f065acc8..1afa5c8558a 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -1613,6 +1613,51 @@ def test_compound_select_uses_independent_cte(self): }, ) + def test_textual_select_uses_independent_cte_one(self): + """test #7760""" + products = table("products", column("id"), column("price")) + + upd_cte = ( + products.update().values(price=10).where(products.c.price > 50) + ).cte() + + stmt = ( + text( + "SELECT products.id, products.price " + "FROM products WHERE products.price < :price_2" + ) + .columns(products.c.id, products.c.price) + .bindparams(price_2=45) + .add_cte(upd_cte) + ) + + self.assert_compile( + stmt, + "WITH anon_1 AS (UPDATE products SET price=:param_1 " + "WHERE products.price > :price_1) " + "SELECT products.id, products.price " + "FROM products WHERE products.price < :price_2", + checkparams={"param_1": 10, "price_1": 50, "price_2": 45}, + ) + + def test_textual_select_uses_independent_cte_two(self): + + foo = table("foo", column("id")) + bar = table("bar", column("id"), column("attr"), column("foo_id")) + s1 = select(foo.c.id) + s2 = text( + "SELECT bar.id, bar.attr FROM bar " + "WHERE bar.foo_id IN (SELECT id FROM baz)" + ).columns(bar.c.id, bar.c.attr) + s3 = s2.add_cte(s1.cte(name="baz")) + + self.assert_compile( + s3, + "WITH baz AS (SELECT foo.id AS id FROM foo) " + "SELECT bar.id, bar.attr FROM bar WHERE bar.foo_id IN " + "(SELECT id FROM baz)", + ) + def test_insert_uses_independent_cte(self): products = table("products", column("id"), column("price")) From 32a62cc2d0ca4c13b7eb02307a592f6781962b65 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Feb 2022 13:43:03 -0500 Subject: [PATCH 127/632] support cx_Oracle DPI disconnect codes Added support to parse "DPI" error codes from cx_Oracle exception objects such as ``DPI-1080`` and ``DPI-1010``, both of which now indicate a disconnect scenario as of cx_Oracle 8.3. Fixes: #7748 Change-Id: I4a10d606d512c0d7f9b4653c47ea5734afffb8a5 (cherry picked from commit 8f9e971f10dee0614054671e0c284f0acace2d04) --- doc/build/changelog/unreleased_14/7748.rst | 7 +++ lib/sqlalchemy/dialects/oracle/cx_oracle.py | 21 +++++++-- test/dialect/oracle/test_dialect.py | 52 ++++++++++++++++++++- 3 files changed, 74 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7748.rst diff --git a/doc/build/changelog/unreleased_14/7748.rst b/doc/build/changelog/unreleased_14/7748.rst new file mode 100644 index 00000000000..d9d6bf23613 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7748.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, oracle, regression + :tickets: 7748 + + Added support to parse "DPI" error codes from cx_Oracle exception objects + such as ``DPI-1080`` and ``DPI-1010``, both of which now indicate a + disconnect scenario as of cx_Oracle 8.3. diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 104b88bc0de..4c89ed7355d 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1318,7 +1318,14 @@ def is_disconnect(self, e, connection, cursor): ) and "not connected" in str(e): return True - if hasattr(error, "code"): + if hasattr(error, "code") and error.code in { + 28, + 3114, + 3113, + 3135, + 1033, + 2396, + }: # ORA-00028: your session has been killed # ORA-03114: not connected to ORACLE # ORA-03113: end-of-file on communication channel @@ -1326,9 +1333,15 @@ def is_disconnect(self, e, connection, cursor): # ORA-01033: ORACLE initialization or shutdown in progress # ORA-02396: exceeded maximum idle time, please connect again # TODO: Others ? - return error.code in (28, 3114, 3113, 3135, 1033, 2396) - else: - return False + return True + + if re.match(r"^(?:DPI-1010|DPI-1080)", str(e)): + # DPI-1010: not connected + # DPI-1080: connection was closed by ORA-3113 + # TODO: others? + return True + + return False def create_xid(self): """create a two-phase transaction ID. diff --git a/test/dialect/oracle/test_dialect.py b/test/dialect/oracle/test_dialect.py index acabfc8bb80..d65a6d2b53a 100644 --- a/test/dialect/oracle/test_dialect.py +++ b/test/dialect/oracle/test_dialect.py @@ -58,7 +58,7 @@ def test_minimum_version(self): exc.InvalidRequestError, "cx_Oracle version 5.2 and above are supported", cx_oracle.OracleDialect_cx_oracle, - dbapi=Mock(), + dbapi=mock.Mock(), ) with mock.patch( @@ -66,13 +66,61 @@ def test_minimum_version(self): "_parse_cx_oracle_ver", lambda self, vers: (5, 3, 1), ): - cx_oracle.OracleDialect_cx_oracle(dbapi=Mock()) + cx_oracle.OracleDialect_cx_oracle(dbapi=mock.Mock()) class DialectWBackendTest(fixtures.TestBase): __backend__ = True __only_on__ = "oracle" + @testing.combinations( + ( + "db is not connected", + None, + True, + ), + ( + "ORA-1234 fake error", + 1234, + False, + ), + ( + "ORA-03114: not connected to ORACLE", + 3114, + True, + ), + ( + "DPI-1010: not connected", + None, + True, + ), + ( + "DPI-1010: make sure we read the code", + None, + True, + ), + ( + "DPI-1080: connection was closed by ORA-3113", + None, + True, + ), + ( + "DPI-1234: some other DPI error", + None, + False, + ), + ) + @testing.only_on("oracle+cx_oracle") + def test_is_disconnect(self, message, code, expected): + + dialect = testing.db.dialect + + exception_obj = dialect.dbapi.InterfaceError() + exception_obj.args = (Exception(message),) + exception_obj.args[0].code = code + + eq_(dialect.is_disconnect(exception_obj, None, None), expected) + def test_hypothetical_not_implemented_isolation_level(self): engine = engines.testing_engine() From a3042e289dce56e74825a712ff1c4ed4311cc2ca Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 25 Feb 2022 12:40:21 -0500 Subject: [PATCH 128/632] block pypy for now the most recent pypy 7.3.8 series (3.7 and 3.9 included, likely 3.8 as well) have installed a 9 year old version of SQLite, and additionally seem to have some other behavioral changes like formatting of exception messages that is breaking some tests. as we are waiting on a response at [1] remove pypy testing for now. [1] https://foss.heptapod.net/pypy/pypy/-/issues/3690 Change-Id: I66650635111e71241b5c45a778954544c8d2490e (cherry picked from commit 51e6a62ab371897d646a5986b9139838402c57df) --- .github/workflows/run-test.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 5675ac6ec5d..1dfbf65f33f 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -34,7 +34,9 @@ jobs: - "3.8" - "3.9" - "3.10" - - "pypy-3.7" + # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 + # which also seems to be in 3.9 + # - "pypy-3.9" build-type: - "cext" - "nocext" From e18d9ebef799c4cf4daebe314a0cbd152680fd2a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 25 Feb 2022 13:26:02 -0500 Subject: [PATCH 129/632] repair GH actions syntax the commit in 51e6a62ab371897d646a5 included a comment that appears to not be accepted Change-Id: I1e56293e0e75c0440073bff7190b4961cfebc353 (cherry picked from commit 96e197f3d0348fd7d79fdd126f989490b51fd9ef) --- .github/workflows/run-test.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 1dfbf65f33f..67c41506b0d 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -34,9 +34,6 @@ jobs: - "3.8" - "3.9" - "3.10" - # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 - # which also seems to be in 3.9 - # - "pypy-3.9" build-type: - "cext" - "nocext" From d96fc604f9172daef28ef5f5e5c1074de966d527 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 27 Feb 2022 14:27:54 -0500 Subject: [PATCH 130/632] remove incorrect deprecation message this message likely referred to ResultProxy, no idea how it referred to CursorResult, that's a very confusing issue. ResultProxy -> CursorResult is mostly a name change as far as backwards compat is concerned so there's not much "deprecated" here. Change-Id: Ic06bdde65a120101b2f9db22483bf3be6ff3556c --- doc/build/core/connections.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index c0efba0f5c0..97191f5aaa6 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -74,9 +74,6 @@ pooling mechanism issues a ``rollback()`` call on the DBAPI connection so that any transactional state or locks are removed, and the connection is ready for its next use. -.. deprecated:: 2.0 The :class:`_engine.CursorResult` object is replaced in SQLAlchemy - 2.0 with a newly refined object known as :class:`_future.Result`. - Our example above illustrated the execution of a textual SQL string, which should be invoked by using the :func:`_expression.text` construct to indicate that we'd like to use textual SQL. The :meth:`_engine.Connection.execute` method can of From 61072ebef08468460484e7c065db0f58626a1448 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 2 Mar 2022 21:43:53 -0500 Subject: [PATCH 131/632] improve error raise for dialect/pool events w/ async engine Fixed issues where a descriptive error message was not raised for some classes of event listening with an async engine, which should instead be a sync engine instance. Change-Id: I00b9f4fe9373ef5fd5464fac10651cc4024f648e (cherry picked from commit e893ce1196a1fb02ad53576fd75ffd40e5d9dd89) --- .../unreleased_14/async_no_event.rst | 6 +++ lib/sqlalchemy/engine/events.py | 8 +++- lib/sqlalchemy/ext/asyncio/events.py | 12 ++++- lib/sqlalchemy/pool/events.py | 8 +++- test/base/test_events.py | 45 +++++++++++++++++++ test/ext/asyncio/test_engine_py3k.py | 22 +++++++++ 6 files changed, 97 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/async_no_event.rst diff --git a/doc/build/changelog/unreleased_14/async_no_event.rst b/doc/build/changelog/unreleased_14/async_no_event.rst new file mode 100644 index 00000000000..8deda89453d --- /dev/null +++ b/doc/build/changelog/unreleased_14/async_no_event.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, asyncio + + Fixed issues where a descriptive error message was not raised for some + classes of event listening with an async engine, which should instead be a + sync engine instance. \ No newline at end of file diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index ca70f037a63..286c4d48773 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -716,8 +716,14 @@ def _accept_with(cls, target): return target elif isinstance(target, Engine): return target.dialect - else: + elif isinstance(target, Dialect): return target + elif hasattr(target, "dispatch") and hasattr( + target.dispatch._events, "_no_async_engine_events" + ): + target.dispatch._events._no_async_engine_events() + else: + return None def do_connect(self, dialect, conn_rec, cargs, cparams): """Receive connection arguments before a connection is made. diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index a059b93e6b9..c5d5e0126e9 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -16,21 +16,29 @@ class AsyncConnectionEvents(engine_event.ConnectionEvents): _dispatch_target = AsyncConnectable @classmethod - def _listen(cls, event_key, retval=False): + def _no_async_engine_events(cls): raise NotImplementedError( "asynchronous events are not implemented at this time. Apply " "synchronous listeners to the AsyncEngine.sync_engine or " "AsyncConnection.sync_connection attributes." ) + @classmethod + def _listen(cls, event_key, retval=False): + cls._no_async_engine_events() + class AsyncSessionEvents(orm_event.SessionEvents): _target_class_doc = "SomeSession" _dispatch_target = AsyncSession @classmethod - def _listen(cls, event_key, retval=False): + def _no_async_engine_events(cls): raise NotImplementedError( "asynchronous events are not implemented at this time. Apply " "synchronous listeners to the AsyncSession.sync_session." ) + + @classmethod + def _listen(cls, event_key, retval=False): + cls._no_async_engine_events() diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index 8dd99bb84aa..2829a58ae30 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -51,8 +51,14 @@ def _accept_with(cls, target): return target elif isinstance(target, Engine): return target.pool - else: + elif isinstance(target, Pool): return target + elif hasattr(target, "dispatch") and hasattr( + target.dispatch._events, "_no_async_engine_events" + ): + target.dispatch._events._no_async_engine_events() + else: + return None @classmethod def _listen(cls, event_key, **kw): diff --git a/test/base/test_events.py b/test/base/test_events.py index 68db5207ca0..4409d6b2947 100644 --- a/test/base/test_events.py +++ b/test/base/test_events.py @@ -7,6 +7,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_not @@ -197,6 +198,50 @@ def test_exec_once(self): eq_(m1.mock_calls, [call(5, 6), call(9, 10)]) + def test_real_name_wrong_dispatch(self): + m1 = Mock() + + class E1(event.Events): + @classmethod + def _accept_with(cls, target): + if isinstance(target, T1): + return target + else: + m1.yup() + return None + + def event_one(self, x, y): + pass + + def event_two(self, x): + pass + + def event_three(self, x): + pass + + class T1(object): + dispatch = event.dispatcher(E1) + + class T2(object): + pass + + class E2(event.Events): + + _dispatch_target = T2 + + def event_four(self, x): + pass + + with expect_raises_message( + exc.InvalidRequestError, "No such event 'event_three'" + ): + + @event.listens_for(E2, "event_three") + def go(*arg): + pass + + eq_(m1.mock_calls, [call.yup()]) + def test_exec_once_exception(self): m1 = Mock() m1.side_effect = ValueError diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 84358f4ee4f..9340f0828dd 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -653,6 +653,28 @@ async def test_no_async_listeners(self, async_engine): ): event.listen(conn, "before_cursor_execute", mock.Mock()) + @async_test + async def test_no_async_listeners_dialect_event(self, async_engine): + with testing.expect_raises_message( + NotImplementedError, + "asynchronous events are not implemented " + "at this time. Apply synchronous listeners to the " + "AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes.", + ): + event.listen(async_engine, "do_execute", mock.Mock()) + + @async_test + async def test_no_async_listeners_pool_event(self, async_engine): + with testing.expect_raises_message( + NotImplementedError, + "asynchronous events are not implemented " + "at this time. Apply synchronous listeners to the " + "AsyncEngine.sync_engine or " + "AsyncConnection.sync_connection attributes.", + ): + event.listen(async_engine, "checkout", mock.Mock()) + @async_test async def test_sync_before_cursor_execute_engine(self, async_engine): canary = mock.Mock() From 2efc21976ad1a384150660cdd98062090c8d1a15 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 3 Mar 2022 10:33:14 -0500 Subject: [PATCH 132/632] quick doc adjustments tweet thread: https://twitter.com/zzzeek/status/1499397255089909762 Change-Id: I5556ef707c2285e7b64c91ae5c58af52efcf4770 (cherry picked from commit f68a51c549754637bd59213101025f1ee63e4865) --- doc/build/index.rst | 52 ++++++++++++++++++++++++++++++++------------- doc/build/intro.rst | 6 ++++++ 2 files changed, 43 insertions(+), 15 deletions(-) diff --git a/doc/build/index.rst b/doc/build/index.rst index 35005872f43..d39f1217a75 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -16,16 +16,11 @@ SQLAlchemy Documentation .. container:: - A high level view and getting set up. + New to SQLAlchemy? Start here: - :doc:`Overview ` | - :ref:`Installation Guide ` | - :doc:`Frequently Asked Questions ` | - :doc:`Migration from 1.3 ` | - :doc:`Glossary ` | - :doc:`Error Messages ` | - :doc:`Changelog catalog ` + * **For Python Beginners:** :ref:`Installation Guide ` - basic guidance on installing with pip and similar + * **For Python Veterans:** :doc:`SQLAlchemy Overview ` - brief architectural overview .. container:: left_right_container @@ -37,20 +32,17 @@ SQLAlchemy Documentation .. container:: - **SQLAlchemy 1.4 / 2.0 Transitional** + **SQLAlchemy 1.4 / 2.0** SQLAlchemy 2.0 is functionally available as part of SQLAlchemy 1.4, and integrates Core and ORM working styles more closely than ever. The new tutorial introduces - both concepts in parallel. New users and those starting new projects should start here! - - * :doc:`/tutorial/index` - SQLAlchemy 2.0's main tutorial - - * :doc:`Migrating to SQLAlchemy 2.0 ` - Complete background on migrating from 1.3 or 1.4 to 2.0 + both concepts in parallel. + * **For all users:** :doc:`/tutorial/index` - The new SQLAlchemy 1.4/2.0 Tutorial .. container:: - **SQLAlchemy 1.x Releases** + **Legacy SQLAlchemy 1.x Tutorials** The 1.x Object Relational Tutorial and Core Tutorial are the legacy tutorials that should be consulted for existing SQLAlchemy codebases. @@ -60,6 +52,21 @@ SQLAlchemy Documentation * :doc:`core/tutorial` +.. container:: left_right_container + + .. container:: leftmost + + .. rst-class:: h2 + + Migration Notes + + .. container:: + + * :doc:`Migration from 1.3 ` - Migration notes for SQLAlchemy Version 1.4 + * :doc:`SQLAlchemy 2.0 Preview ` - Background on preparing a SQLAlchemy 1.4 application for SQLAlchemy 2.0 + * :doc:`Changelog catalog ` - Detailed changelogs for all SQLAlchemy Versions + + .. container:: left_right_container .. container:: leftmost @@ -150,3 +157,18 @@ SQLAlchemy Documentation :doc:`More Dialects ... ` +.. container:: left_right_container + + .. container:: leftmost + + .. rst-class:: h2 + + Supplementary + + .. container:: + + * :doc:`Frequently Asked Questions ` - A collection of common problems and solutions + * :doc:`Glossary ` - Terms used in SQLAlchemy's documentation + * :doc:`Error Message Guide ` - Explainations of many SQLAlchemy Errors + * :doc:`Complete table of of contents ` + * :doc:`Index ` diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 01e33df0346..4f1b64d15b4 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -206,6 +206,12 @@ Python prompt like this: >>> sqlalchemy.__version__ # doctest: +SKIP 1.4.0 +Next Steps +---------- + +With SQLAlchemy installed, new and old users alike can +:ref:`Proceed to the SQLAlchemy Tutorial `. + .. _migration: 1.3 to 1.4 Migration From f21df4d12cef88609ee898b9cd2723ed59fa0866 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 11:29:54 -0500 Subject: [PATCH 133/632] Version 1.4.32 placeholder Mike cherry-picking this manually as it appears I forgot to run post_update when I released 1.4.31 (cherry picked from commit 758f2b96d336c7ba5382e89bfd421f986b5ef846) --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 4c68a387e86..79102dd3e29 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.32 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.31 :released: January 20, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 12056602e9c..9fb36951323 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.31" +__version__ = "1.4.32" def __go(lcls): From 3cc9448ce311e4a18e746bddd55a2bf912e317c4 Mon Sep 17 00:00:00 2001 From: petit87 Date: Sat, 26 Feb 2022 16:46:32 -0500 Subject: [PATCH 134/632] Fix repr for MySQL SET, generic Enum Fixed issues in :class:`_mysql.SET` datatype as well as :class:`.Enum` where the ``__repr__()`` method would not render all optional parameters in the string output, impacting the use of these types in Alembic autogenerate. Pull request for MySQL courtesy Yuki Nishimine. Fixes: #7720 Fixes: #7789 Closes: #7772 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7772 Pull-request-sha: d58845479f497f6b2e12d7df2e9eb2d6ac22109b Co-authored-by: Mike Bayer Change-Id: Idcec23eab4258511d9f32f4e3d78e511ea6021f1 (cherry picked from commit a926dea6b78c91b627f0f0b86cdc6a9279872e99) --- .../changelog/unreleased_14/7720_7789.rst | 9 +++++++++ lib/sqlalchemy/dialects/mysql/enumerated.py | 9 +++++++++ lib/sqlalchemy/sql/sqltypes.py | 5 ++++- test/dialect/mysql/test_types.py | 18 ++++++++++++++++++ test/sql/test_types.py | 7 +++++++ 5 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7720_7789.rst diff --git a/doc/build/changelog/unreleased_14/7720_7789.rst b/doc/build/changelog/unreleased_14/7720_7789.rst new file mode 100644 index 00000000000..5c1026ef5f6 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7720_7789.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql, mysql + :tickets: 7720, 7789 + + Fixed issues in :class:`_mysql.SET` datatype as well as :class:`.Enum` + where the ``__repr__()`` method would not render all optional parameters in + the string output, impacting the use of these types in Alembic + autogenerate. Pull request for MySQL courtesy Yuki Nishimine. + diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index 9857a820e66..6c9ef28ec16 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -252,3 +252,12 @@ def process(value): def adapt(self, impltype, **kw): kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise return util.constructor_copy(self, impltype, *self.values, **kw) + + def __repr__(self): + return util.generic_repr( + self, + to_inspect=[SET, _StringType], + additional_kw=[ + ("retrieve_as_bitwise", False), + ], + ) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 69cb858e509..726313fcf40 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1672,7 +1672,10 @@ def _object_value_for_elem(self, elem): def __repr__(self): return util.generic_repr( self, - additional_kw=[("native_enum", True)], + additional_kw=[ + ("native_enum", True), + ("create_constraint", False), + ], to_inspect=[Enum, SchemaType], ) diff --git a/test/dialect/mysql/test_types.py b/test/dialect/mysql/test_types.py index 7bdf6f8ceb7..3afe6c38538 100644 --- a/test/dialect/mysql/test_types.py +++ b/test/dialect/mysql/test_types.py @@ -1312,6 +1312,24 @@ def test_broken_enum_returns_blanks(self, metadata, connection): [("", ""), ("", ""), ("two", "two"), (None, None)], ) + @testing.combinations( + ( + [""], + {"retrieve_as_bitwise": True}, + "SET('', retrieve_as_bitwise=True)", + ), + (["a"], {}, "SET('a')"), + (["a", "b", "c"], {}, "SET('a', 'b', 'c')"), + ( + ["a", "b", "c"], + {"collation": "utf8_bin"}, + "SET('a', 'b', 'c', collation='utf8_bin')", + ), + argnames="value,kw,expected", + ) + def test_set_repr(self, value, kw, expected): + eq_(repr(mysql.SET(*value, **kw)), expected) + def colspec(c): return testing.db.dialect.ddl_compiler( diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 935c2354dd0..8530b904331 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -2507,6 +2507,13 @@ def test_repr(self): "inherit_schema=True, native_enum=False)", ) + def test_repr_two(self): + e = Enum("x", "y", name="somename", create_constraint=True) + eq_( + repr(e), + "Enum('x', 'y', name='somename', create_constraint=True)", + ) + def test_length_native(self): e = Enum("x", "y", "long", length=42) From 762b8a73a3983f0d4c3bb1bdb5449e7c7bcedc38 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Fri, 4 Mar 2022 11:13:14 -0700 Subject: [PATCH 135/632] Add LongAsMax note to mssql+pyodbc dialect docs Change-Id: I4491b188bae49ac615f8691dd9b7a8a341428ce7 (cherry picked from commit 8316e21e3821f678cf4c0ecbc9df5e360ddb5668) --- lib/sqlalchemy/dialects/mssql/pyodbc.py | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 4c164a73f20..91e8fd6b5a0 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -179,6 +179,33 @@ def provide_token(dialect, conn_rec, cargs, cparams): isolation_level="AUTOCOMMIT" ) +Avoiding sending large string parameters as TEXT/NTEXT +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +By default, for historical reasons, Microsoft's ODBC drivers for SQL Server +send long string parameters (greater than 4000 SBCS characters or 2000 Unicode +characters) as TEXT/NTEXT values. TEXT and NTEXT have been deprecated for many +years and are starting to cause compatibility issues with newer versions of +SQL_Server/Azure. For example, see `this +issue `_. + +Starting with ODBC Driver 18 for SQL Server we can override the legacy +behavior and pass long strings as varchar(max)/nvarchar(max) using the +``LongAsMax=Yes`` connection string parameter:: + + connection_url = sa.engine.URL.create( + "mssql+pyodbc", + username="scott", + password="tiger", + host="mssqlserver.example.com", + database="mydb", + query={ + "driver": "ODBC Driver 18 for SQL Server", + "LongAsMax": "Yes", + }, + ) + + Pyodbc Pooling / connection close behavior ------------------------------------------ From ea8f7e4466e683ad34a08562eed5311b7d242c9b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 16:28:31 -0500 Subject: [PATCH 136/632] correct for pytest discovery Made corrections to the default pytest configuration so that test discovery runs correctly; previously, a configuration error had the effect of discovery locating the wrong files if a super-directory of the current directory were named "test". Fixes: #7045 Change-Id: I2e1f63a35f80ae3f53008f327d83c8342fa7f2f6 (cherry picked from commit d2815b4ac39b42a38dff4cc21ec100c72b9f3cae) --- setup.cfg | 3 ++- test/conftest.py | 6 +++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 49f829f7257..807ee917059 100644 --- a/setup.cfg +++ b/setup.cfg @@ -93,7 +93,8 @@ where = lib [tool:pytest] addopts = --tb native -v -r sfxX --maxfail=250 -p no:warnings -p no:logging -python_files = test/*test_*.py +norecursedirs = examples build doc lib +python_files = test_*.py [upload] sign = 1 diff --git a/test/conftest.py b/test/conftest.py index c1c6c8c21be..515fff340e0 100755 --- a/test/conftest.py +++ b/test/conftest.py @@ -32,7 +32,11 @@ # We check no_user_site to honor the use of this flag. sys.path.insert( 0, - os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "lib"), + os.path.abspath( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "lib" + ) + ), ) # use bootstrapping so that test plugins are loaded From d8c293145f2aa6d9bf90fd717ec45d947171f15c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 17:30:21 -0500 Subject: [PATCH 137/632] add length to enum repr params This amends the fix for #7789. Fixes: #7598 Change-Id: I067a081d743f1efaf8288601bec0400712012265 (cherry picked from commit a26a522648af14ffb9388d8d306bd98523bef1c9) --- doc/build/changelog/unreleased_14/7720_7789.rst | 2 +- lib/sqlalchemy/sql/sqltypes.py | 5 +++-- test/sql/test_types.py | 7 +++++++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7720_7789.rst b/doc/build/changelog/unreleased_14/7720_7789.rst index 5c1026ef5f6..5a521fe0103 100644 --- a/doc/build/changelog/unreleased_14/7720_7789.rst +++ b/doc/build/changelog/unreleased_14/7720_7789.rst @@ -1,6 +1,6 @@ .. change:: :tags: bug, sql, mysql - :tickets: 7720, 7789 + :tickets: 7720, 7789, 7598 Fixed issues in :class:`_mysql.SET` datatype as well as :class:`.Enum` where the ``__repr__()`` method would not render all optional parameters in diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 726313fcf40..cc3dbffc5de 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1520,9 +1520,9 @@ def _enum_init(self, enums, kw): _expect_unicode = convert_unicode if self.enums: - length = max(len(x) for x in self.enums) + self._default_length = length = max(len(x) for x in self.enums) else: - length = 0 + self._default_length = length = 0 if not self.native_enum and length_arg is not NO_ARG: if length_arg < length: raise ValueError( @@ -1675,6 +1675,7 @@ def __repr__(self): additional_kw=[ ("native_enum", True), ("create_constraint", False), + ("length", self._default_length), ], to_inspect=[Enum, SchemaType], ) diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 8530b904331..9f8b8a662c0 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -2514,6 +2514,13 @@ def test_repr_two(self): "Enum('x', 'y', name='somename', create_constraint=True)", ) + def test_repr_three(self): + e = Enum("x", "y", native_enum=False, length=255) + eq_( + repr(e), + "Enum('x', 'y', native_enum=False, length=255)", + ) + def test_length_native(self): e = Enum("x", "y", "long", length=42) From efe43590523bc97d0df17a2524078fed813ae8c1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 17:17:53 -0500 Subject: [PATCH 138/632] fix type string formatting calls Fixed type-related error messages that would fail for values that were tuples, due to string formatting syntax, including compile of unsupported literal values and invalid boolean values. Fixes: #7721 Change-Id: I6775721486ef2db2d0738b9aa08b9f2570f55659 (cherry picked from commit a261a78894c4f835b5da7fcbfb3d466a687bc11b) --- doc/build/changelog/unreleased_14/7721.rst | 7 +++++++ lib/sqlalchemy/sql/sqltypes.py | 8 ++++---- test/sql/test_types.py | 18 ++++++++++++++++++ 3 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7721.rst diff --git a/doc/build/changelog/unreleased_14/7721.rst b/doc/build/changelog/unreleased_14/7721.rst new file mode 100644 index 00000000000..d719e223342 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7721.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, sql + :tickets: 7721 + + Fixed type-related error messages that would fail for values that were + tuples, due to string formatting syntax, including compile of unsupported + literal values and invalid boolean values. diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 726313fcf40..72b7a2899a1 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1973,10 +1973,10 @@ def python_type(self): def _strict_as_bool(self, value): if value not in self._strict_bools: if not isinstance(value, int): - raise TypeError("Not a boolean value: %r" % value) + raise TypeError("Not a boolean value: %r" % (value,)) else: raise ValueError( - "Value %r is not None, True, or False" % value + "Value %r is not None, True, or False" % (value,) ) return value @@ -3220,7 +3220,7 @@ class NullType(TypeEngine): def literal_processor(self, dialect): def process(value): raise exc.CompileError( - "Don't know how to render literal SQL value: %r" % value + "Don't know how to render literal SQL value: %r" % (value,) ) return process @@ -3315,7 +3315,7 @@ def _resolve_value_to_type(value): insp.__class__ in inspection._registrars ): raise exc.ArgumentError( - "Object %r is not legal as a SQL literal value" % value + "Object %r is not legal as a SQL literal value" % (value,) ) return NULLTYPE else: diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 8530b904331..d164de4ec6f 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -82,6 +82,7 @@ from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated_20 from sqlalchemy.testing import expect_raises +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -3457,6 +3458,23 @@ def test_detect_coercion_not_fooled_by_mock(self): class CompileTest(fixtures.TestBase, AssertsCompiledSQL): __dialect__ = "default" + def test_compile_err_formatting(self): + with expect_raises_message( + exc.CompileError, + r"Don't know how to render literal SQL value: \(1, 2, 3\)", + ): + func.foo((1, 2, 3)).compile(compile_kwargs={"literal_binds": True}) + + def test_strict_bool_err_formatting(self): + typ = Boolean() + + dialect = default.DefaultDialect() + with expect_raises_message( + TypeError, + r"Not a boolean value: \(5,\)", + ): + typ.bind_processor(dialect)((5,)) + @testing.requires.unbounded_varchar def test_string_plain(self): self.assert_compile(String(), "VARCHAR") From bbb6b6da20b50a92f6244c7dcf8701cfbb20516f Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 25 Feb 2022 22:08:09 +0100 Subject: [PATCH 139/632] try fixing github actions again Change-Id: Iaf801a028510f276cf94b4999f129de8f4eb590c (cherry picked from commit 9e7c068d669b209713da62da5748579f92d98129) --- .github/workflows/run-test.yaml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 67c41506b0d..81b6799e1b2 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -34,6 +34,9 @@ jobs: - "3.8" - "3.9" - "3.10" + # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 + # which also seems to be in 3.9 + # - "pypy-3.9" build-type: - "cext" - "nocext" @@ -43,8 +46,8 @@ jobs: include: # autocommit tests fail on the ci for some reason - - python-version: "pypy-3.7" - pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" + # - python-version: "pypy-3.9" + # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" # add aiosqlite on linux - os: "ubuntu-latest" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" @@ -60,11 +63,11 @@ jobs: - os: "macos-latest" architecture: x86 # pypy does not have cext - - python-version: "pypy-3.7" - build-type: "cext" - - os: "windows-latest" - python-version: "pypy-3.7" - architecture: x86 + # - python-version: "pypy-3.9" + # build-type: "cext" + # - os: "windows-latest" + # python-version: "pypy-3.9" + # architecture: x86 fail-fast: false From 4955b6f53fa278614d6fd458a899bcb9b75db675 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 18:27:24 -0500 Subject: [PATCH 140/632] warn for enum length silently ignored the "length" parameter is silently ignored when native_enum is not passed as False. if native_enum is True, a non-native VARCHAR can still be generated. Warn for this silent ignore right now, consider having "length" used in all cases where non-native enum is rendered likely in 2.0. Change-Id: Ibceedd4e3aa3926f3268c0c39d94ab73d17a9bdc (cherry picked from commit 18683f474b285b4d7e16c38c0a570276912e1081) --- .../unreleased_14/enum_length_warning.rst | 12 ++++++ lib/sqlalchemy/sql/sqltypes.py | 39 +++++++++++++------ test/sql/test_types.py | 21 +++++++++- 3 files changed, 58 insertions(+), 14 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/enum_length_warning.rst diff --git a/doc/build/changelog/unreleased_14/enum_length_warning.rst b/doc/build/changelog/unreleased_14/enum_length_warning.rst new file mode 100644 index 00000000000..f1dfab64cbf --- /dev/null +++ b/doc/build/changelog/unreleased_14/enum_length_warning.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, sql + + The :class:`_sqltypes.Enum` datatype now emits a warning if the + :paramref:`_sqltypes.Enum.length` argument is specified without also + specifying :paramref:`_sqltypes.Enum.native_enum` as False, as the + parameter is otherwise silently ignored in this case, despite the fact that + the :class:`_sqltypes.Enum` datatype will still render VARCHAR DDL on + backends that don't have a native ENUM datatype such as SQLite. This + behavior may change in a future release so that "length" is honored for all + non-native "enum" types regardless of the "native_enum" setting. + diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index cc3dbffc5de..0b67e293727 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1400,8 +1400,9 @@ class was used, its name (converted to lower case) is used by :param native_enum: Use the database's native ENUM type when available. Defaults to True. When False, uses VARCHAR + check - constraint for all backends. The VARCHAR length can be controlled - with :paramref:`.Enum.length` + constraint for all backends. When False, the VARCHAR length can be + controlled with :paramref:`.Enum.length`; currently "length" is + ignored if native_enum=True. :param length: Allows specifying a custom length for the VARCHAR when :paramref:`.Enum.native_enum` is False. By default it uses the @@ -1500,7 +1501,7 @@ def _enum_init(self, enums, kw): self._sort_key_function = kw.pop("sort_key_function", NO_ARG) length_arg = kw.pop("length", NO_ARG) self._omit_aliases = kw.pop("omit_aliases", NO_ARG) - + _disable_warnings = kw.pop("_disable_warnings", False) values, objects = self._parse_into_values(enums, kw) self._setup_for_values(values, objects, kw) @@ -1523,14 +1524,24 @@ def _enum_init(self, enums, kw): self._default_length = length = max(len(x) for x in self.enums) else: self._default_length = length = 0 - if not self.native_enum and length_arg is not NO_ARG: - if length_arg < length: - raise ValueError( - "When provided, length must be larger or equal" - " than the length of the longest enum value. %s < %s" - % (length_arg, length) - ) - length = length_arg + + if length_arg is not NO_ARG: + if self.native_enum: + if not _disable_warnings: + util.warn( + "Enum 'length' argument is currently ignored unless " + "native_enum is specified as False, including for DDL " + "that renders VARCHAR in any case. This may change " + "in a future release." + ) + else: + if not _disable_warnings and length_arg < length: + raise ValueError( + "When provided, length must be larger or equal" + " than the length of the longest enum value. %s < %s" + % (length_arg, length) + ) + length = length_arg self._valid_lookup[None] = self._object_lookup[None] = None @@ -1690,12 +1701,15 @@ def as_generic(self, allow_nulltype=False): "an `enums` attribute." ) - return util.constructor_copy(self, self._generic_type_affinity, *args) + return util.constructor_copy( + self, self._generic_type_affinity, *args, _disable_warnings=True + ) def adapt_to_emulated(self, impltype, **kw): kw.setdefault("_expect_unicode", self._expect_unicode) kw.setdefault("validate_strings", self.validate_strings) kw.setdefault("name", self.name) + kw["_disable_warnings"] = True kw.setdefault("schema", self.schema) kw.setdefault("inherit_schema", self.inherit_schema) kw.setdefault("metadata", self.metadata) @@ -1710,6 +1724,7 @@ def adapt_to_emulated(self, impltype, **kw): def adapt(self, impltype, **kw): kw["_enums"] = self._enums_argument + kw["_disable_warnings"] = True return super(Enum, self).adapt(impltype, **kw) def _should_create_constraint(self, compiler, **kw): diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 9f8b8a662c0..5384230153c 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -2521,13 +2521,30 @@ def test_repr_three(self): "Enum('x', 'y', native_enum=False, length=255)", ) + def test_repr_four(self): + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", length=255) + # length is currently ignored if native_enum is not False + eq_( + repr(e), + "Enum('x', 'y')", + ) + def test_length_native(self): - e = Enum("x", "y", "long", length=42) + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", "long", length=42) eq_(e.length, len("long")) # no error is raised - e = Enum("x", "y", "long", length=1) + with expect_warnings( + "Enum 'length' argument is currently ignored unless native_enum" + ): + e = Enum("x", "y", "long", length=1) eq_(e.length, len("long")) def test_length_raises(self): From 4f8e2fc8f04e9e668e6825a2f2bf30e70ccd7b3d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Mar 2022 18:30:40 -0500 Subject: [PATCH 141/632] add missing changelog for #7045 was missed in d2815b4ac39b42a38dff4cc21ec100c72b9f3cae Fixes: #7045 Change-Id: Id4836690daabf7e547c278a4e538d39579e5f2a2 (cherry picked from commit 825f555a81e55b541ba33e84ee8131d8c0de6d47) --- doc/build/changelog/unreleased_14/7045.rst | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7045.rst diff --git a/doc/build/changelog/unreleased_14/7045.rst b/doc/build/changelog/unreleased_14/7045.rst new file mode 100644 index 00000000000..ca84c5f36c6 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7045.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, tests + :tickets: 7045 + + Made corrections to the default pytest configuration so that test discovery + runs correctly; previously, a configuration error had the effect of + discovery locating the wrong files if a super-directory of the current + directory were named "test". From 03e17631cc6e6e521668ef9deec908d8c7265a0a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 6 Mar 2022 13:18:59 -0500 Subject: [PATCH 142/632] 1.4.32 changelog edits Change-Id: I3f62ba9c8ff35c8ae1d0bd611db92521da38dff0 --- doc/build/changelog/unreleased_14/7045.rst | 9 +++++---- doc/build/changelog/unreleased_14/7594.rst | 16 ++++++++-------- doc/build/changelog/unreleased_14/7667.rst | 7 ++++--- doc/build/changelog/unreleased_14/7720_7789.rst | 9 +++++---- doc/build/changelog/unreleased_14/7736.rst | 6 +++--- doc/build/changelog/unreleased_14/7738.rst | 6 ++++-- doc/build/conf.py | 1 + 7 files changed, 30 insertions(+), 24 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7045.rst b/doc/build/changelog/unreleased_14/7045.rst index ca84c5f36c6..a3643a43352 100644 --- a/doc/build/changelog/unreleased_14/7045.rst +++ b/doc/build/changelog/unreleased_14/7045.rst @@ -2,7 +2,8 @@ :tags: bug, tests :tickets: 7045 - Made corrections to the default pytest configuration so that test discovery - runs correctly; previously, a configuration error had the effect of - discovery locating the wrong files if a super-directory of the current - directory were named "test". + Made corrections to the default pytest configuration regarding how test + discovery is configured, to fix issue where the test suite would not + configure warnings correctly and also attempt to load example suites as + tests, in the specific case where the SQLAlchemy checkout were located in + an absolute path that had a super-directory named "test". diff --git a/doc/build/changelog/unreleased_14/7594.rst b/doc/build/changelog/unreleased_14/7594.rst index 427bac97e31..08f22749ec0 100644 --- a/doc/build/changelog/unreleased_14/7594.rst +++ b/doc/build/changelog/unreleased_14/7594.rst @@ -6,11 +6,11 @@ INSERT silently fails to actually insert a row (such as from a trigger) would not be reached, due to a runtime exception raised ahead of time due to the missing primary key value, thus raising an uninformative exception - rather than the correct one. For 1.4 and above, a new ``FlushError`` is - added for this case that's raised earlier than the previous "null identity" - exception was for 1.3, as a situation where the number of rows actually - INSERTed does not match what was expected is a more critical situation in - 1.4 as it prevents batching of multiple objects from working correctly. - This is separate from the case where a newly fetched primary key is - fetched as NULL, which continues to raise the existing "null identity" - exception. \ No newline at end of file + rather than the correct one. For 1.4 and above, a new + :class:`_ormexc.FlushError` is added for this case that's raised earlier + than the previous "null identity" exception was for 1.3, as a situation + where the number of rows actually INSERTed does not match what was expected + is a more critical situation in 1.4 as it prevents batching of multiple + objects from working correctly. This is separate from the case where a + newly fetched primary key is fetched as NULL, which continues to raise the + existing "null identity" exception. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7667.rst b/doc/build/changelog/unreleased_14/7667.rst index 34dcd44b0ba..a66bb123851 100644 --- a/doc/build/changelog/unreleased_14/7667.rst +++ b/doc/build/changelog/unreleased_14/7667.rst @@ -3,13 +3,14 @@ :tickets: 7667 Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed - to raise an informative exception if the ``stream_results`` execution + to raise an informative exception if the + :paramref:`_engine.Connection.execution_options.stream_results` execution option were used, which is incompatible with a sync-style :class:`_result.Result` object when using an asyncio calling style, as the operation to fetch more rows would need to be awaited. An exception is now raised in this scenario in the same way one was already raised when the - ``stream_results`` option would be used with the - :meth:`_asyncio.AsyncConnection.execute` method. + :paramref:`_engine.Connection.execution_options.stream_results` option + would be used with the :meth:`_asyncio.AsyncConnection.execute` method. Additionally, for improved stability with state-sensitive database drivers such as asyncmy, the cursor is now closed when this error condition is diff --git a/doc/build/changelog/unreleased_14/7720_7789.rst b/doc/build/changelog/unreleased_14/7720_7789.rst index 5a521fe0103..ddbc327df2b 100644 --- a/doc/build/changelog/unreleased_14/7720_7789.rst +++ b/doc/build/changelog/unreleased_14/7720_7789.rst @@ -2,8 +2,9 @@ :tags: bug, sql, mysql :tickets: 7720, 7789, 7598 - Fixed issues in :class:`_mysql.SET` datatype as well as :class:`.Enum` - where the ``__repr__()`` method would not render all optional parameters in - the string output, impacting the use of these types in Alembic - autogenerate. Pull request for MySQL courtesy Yuki Nishimine. + Fixed issues in MySQL :class:`_mysql.SET` datatype as well as the generic + :class:`.Enum` datatype where the ``__repr__()`` method would not render + all optional parameters in the string output, impacting the use of these + types in Alembic autogenerate. Pull request for MySQL courtesy Yuki + Nishimine. diff --git a/doc/build/changelog/unreleased_14/7736.rst b/doc/build/changelog/unreleased_14/7736.rst index 828dd540a4c..5071961d1c8 100644 --- a/doc/build/changelog/unreleased_14/7736.rst +++ b/doc/build/changelog/unreleased_14/7736.rst @@ -2,8 +2,8 @@ :tags: bug, sqlite :tickets: 7736 - Fixed issue where SQLite unique constraint reflection would not work - for an inline UNIQUE constraint where the column name had an underscore + Fixed issue where SQLite unique constraint reflection would fail to detect + a column-inline UNIQUE constraint where the column name had an underscore in its name. .. change:: @@ -12,5 +12,5 @@ Added support for reflecting SQLite inline unique constraints where the column names are formatted with SQLite "escape quotes" ``[]`` - or `` ` ``, which are discarded by the database when producing the + or `````, which are discarded by the database when producing the column name. diff --git a/doc/build/changelog/unreleased_14/7738.rst b/doc/build/changelog/unreleased_14/7738.rst index dbb028fde38..322ddb458b5 100644 --- a/doc/build/changelog/unreleased_14/7738.rst +++ b/doc/build/changelog/unreleased_14/7738.rst @@ -3,5 +3,7 @@ :tickets: 7738 Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 - where the DBAPI no longer pre-buffers cursor.lastrowid. The dialect now - fetches this value proactively for situations where it applies. + where the DBAPI no longer pre-buffers cursor.lastrowid, leading to errors + when inserting objects with the ORM as well as causing non-availability of + the :attr:`_result.CursorResult.inserted_primary_key` attribute. The + dialect now fetches this value proactively for situations where it applies. diff --git a/doc/build/conf.py b/doc/build/conf.py index 67a629799b3..e1e798286b5 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -147,6 +147,7 @@ "_row": "sqlalchemy.engine", "_schema": "sqlalchemy.schema", "_types": "sqlalchemy.types", + "_sqltypes": "sqlalchemy.types", "_asyncio": "sqlalchemy.ext.asyncio", "_expression": "sqlalchemy.sql.expression", "_sql": "sqlalchemy.sql.expression", From 293cb14e2198c11e9c8b71454f35c0ff92dc25ab Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 6 Mar 2022 17:28:39 -0500 Subject: [PATCH 143/632] - 1.4.32 --- doc/build/changelog/changelog_14.rst | 204 +++++++++++++++++- doc/build/changelog/unreleased_14/7045.rst | 9 - doc/build/changelog/unreleased_14/7518.rst | 10 - doc/build/changelog/unreleased_14/7594.rst | 16 -- doc/build/changelog/unreleased_14/7599.rst | 16 -- doc/build/changelog/unreleased_14/7600.rst | 12 -- doc/build/changelog/unreleased_14/7612.rst | 11 - doc/build/changelog/unreleased_14/7667.rst | 19 -- doc/build/changelog/unreleased_14/7676.rst | 8 - doc/build/changelog/unreleased_14/7697.rst | 8 - .../changelog/unreleased_14/7720_7789.rst | 10 - doc/build/changelog/unreleased_14/7721.rst | 7 - doc/build/changelog/unreleased_14/7736.rst | 16 -- doc/build/changelog/unreleased_14/7738.rst | 9 - doc/build/changelog/unreleased_14/7748.rst | 7 - doc/build/changelog/unreleased_14/7760.rst | 9 - .../unreleased_14/async_no_event.rst | 6 - .../unreleased_14/enum_length_warning.rst | 12 -- doc/build/conf.py | 4 +- 19 files changed, 205 insertions(+), 188 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7045.rst delete mode 100644 doc/build/changelog/unreleased_14/7518.rst delete mode 100644 doc/build/changelog/unreleased_14/7594.rst delete mode 100644 doc/build/changelog/unreleased_14/7599.rst delete mode 100644 doc/build/changelog/unreleased_14/7600.rst delete mode 100644 doc/build/changelog/unreleased_14/7612.rst delete mode 100644 doc/build/changelog/unreleased_14/7667.rst delete mode 100644 doc/build/changelog/unreleased_14/7676.rst delete mode 100644 doc/build/changelog/unreleased_14/7697.rst delete mode 100644 doc/build/changelog/unreleased_14/7720_7789.rst delete mode 100644 doc/build/changelog/unreleased_14/7721.rst delete mode 100644 doc/build/changelog/unreleased_14/7736.rst delete mode 100644 doc/build/changelog/unreleased_14/7738.rst delete mode 100644 doc/build/changelog/unreleased_14/7748.rst delete mode 100644 doc/build/changelog/unreleased_14/7760.rst delete mode 100644 doc/build/changelog/unreleased_14/async_no_event.rst delete mode 100644 doc/build/changelog/unreleased_14/enum_length_warning.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 79102dd3e29..eacb83edfd4 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,209 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.32 - :include_notes_from: unreleased_14 + :released: March 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 7721 + + Fixed type-related error messages that would fail for values that were + tuples, due to string formatting syntax, including compile of unsupported + literal values and invalid boolean values. + + .. change:: + :tags: bug, sql, mysql + :tickets: 7720, 7789, 7598 + + Fixed issues in MySQL :class:`_mysql.SET` datatype as well as the generic + :class:`.Enum` datatype where the ``__repr__()`` method would not render + all optional parameters in the string output, impacting the use of these + types in Alembic autogenerate. Pull request for MySQL courtesy Yuki + Nishimine. + + + .. change:: + :tags: bug, sqlite + :tickets: 7736 + + Fixed issue where SQLite unique constraint reflection would fail to detect + a column-inline UNIQUE constraint where the column name had an underscore + in its name. + + .. change:: + :tags: usecase, sqlite + :tickets: 7736 + + Added support for reflecting SQLite inline unique constraints where + the column names are formatted with SQLite "escape quotes" ``[]`` + or `````, which are discarded by the database when producing the + column name. + + .. change:: + :tags: bug, oracle + :tickets: 7676 + + Fixed issue in Oracle dialect where using a column name that requires + quoting when written as a bound parameter, such as ``"_id"``, would not + correctly track a Python generated default value due to the bound-parameter + rewriting missing this value, causing an Oracle error to be raised. + + .. change:: + :tags: bug, tests + :tickets: 7599 + + Improvements to the test suite's integration with pytest such that the + "warnings" plugin, if manually enabled, will not interfere with the test + suite, such that third parties can enable the warnings plugin or make use + of the ``-W`` parameter and SQLAlchemy's test suite will continue to pass. + Additionally, modernized the detection of the "pytest-xdist" plugin so that + plugins can be globally disabled using PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 + without breaking the test suite if xdist were still installed. Warning + filters that promote deprecation warnings to errors are now localized to + SQLAlchemy-specific warnings, or within SQLAlchemy-specific sources for + general Python deprecation warnings, so that non-SQLAlchemy deprecation + warnings emitted from pytest plugins should also not impact the test suite. + + + .. change:: + :tags: bug, sql + + The :class:`_sqltypes.Enum` datatype now emits a warning if the + :paramref:`_sqltypes.Enum.length` argument is specified without also + specifying :paramref:`_sqltypes.Enum.native_enum` as False, as the + parameter is otherwise silently ignored in this case, despite the fact that + the :class:`_sqltypes.Enum` datatype will still render VARCHAR DDL on + backends that don't have a native ENUM datatype such as SQLite. This + behavior may change in a future release so that "length" is honored for all + non-native "enum" types regardless of the "native_enum" setting. + + + .. change:: + :tags: bug, mysql, regression + :tickets: 7518 + + Fixed regression caused by :ticket:`7518` where changing the syntax "SHOW + VARIABLES" to "SELECT @@" broke compatibility with MySQL versions older + than 5.6, including early 5.0 releases. While these are very old MySQL + versions, a change in compatibility was not planned, so version-specific + logic has been restored to fall back to "SHOW VARIABLES" for MySQL server + versions < 5.6. + + .. change:: + :tags: bug, asyncio + + Fixed issues where a descriptive error message was not raised for some + classes of event listening with an async engine, which should instead be a + sync engine instance. + + .. change:: + :tags: bug, mariadb, regression + :tickets: 7738 + + Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 + where the DBAPI no longer pre-buffers cursor.lastrowid, leading to errors + when inserting objects with the ORM as well as causing non-availability of + the :attr:`_result.CursorResult.inserted_primary_key` attribute. The + dialect now fetches this value proactively for situations where it applies. + + .. change:: + :tags: usecase, postgresql + :tickets: 7600 + + Added compiler support for the PostgreSQL ``NOT VALID`` phrase when rendering + DDL for the :class:`.CheckConstraint`, :class:`.ForeignKeyConstraint` + and :class:`.ForeignKey` schema constructs. Pull request courtesy + Gilbert Gilb's. + + .. seealso:: + + :ref:`postgresql_constraint_options` + + .. change:: + :tags: bug, orm, regression + :tickets: 7594 + + Fixed regression where the ORM exception that is to be raised when an + INSERT silently fails to actually insert a row (such as from a trigger) + would not be reached, due to a runtime exception raised ahead of time due + to the missing primary key value, thus raising an uninformative exception + rather than the correct one. For 1.4 and above, a new + :class:`_ormexc.FlushError` is added for this case that's raised earlier + than the previous "null identity" exception was for 1.3, as a situation + where the number of rows actually INSERTed does not match what was expected + is a more critical situation in 1.4 as it prevents batching of multiple + objects from working correctly. This is separate from the case where a + newly fetched primary key is fetched as NULL, which continues to raise the + existing "null identity" exception. + + .. change:: + :tags: bug, tests + :tickets: 7045 + + Made corrections to the default pytest configuration regarding how test + discovery is configured, to fix issue where the test suite would not + configure warnings correctly and also attempt to load example suites as + tests, in the specific case where the SQLAlchemy checkout were located in + an absolute path that had a super-directory named "test". + + .. change:: + :tags: bug, orm + :tickets: 7697 + + Fixed issue where using a fully qualified path for the classname in + :func:`_orm.relationship` that nonetheless contained an incorrect name for + path tokens that were not the first token, would fail to raise an + informative error and would instead fail randomly at a later step. + + .. change:: + :tags: bug, oracle, regression + :tickets: 7748 + + Added support to parse "DPI" error codes from cx_Oracle exception objects + such as ``DPI-1080`` and ``DPI-1010``, both of which now indicate a + disconnect scenario as of cx_Oracle 8.3. + + .. change:: + :tags: bug, sql + :tickets: 7760 + + Fixed issue where the :meth:`.HasCTE.add_cte` method as called upon a + :class:`.TextualSelect` instance was not being accommodated by the SQL + compiler. The fix additionally adds more "SELECT"-like compiler behavior to + :class:`.TextualSelect` including that DML CTEs such as UPDATE and INSERT + may be accommodated. + + .. change:: + :tags: bug, engine + :tickets: 7612 + + Adjusted the logging for key SQLAlchemy components including + :class:`_engine.Engine`, :class:`_engine.Connection` to establish an + appropriate stack level parameter, so that the Python logging tokens + ``funcName`` and ``lineno`` when used in custom logging formatters will + report the correct information, which can be useful when filtering log + output; supported on Python 3.8 and above. Pull request courtesy Markus + Gerstel. + + .. change:: + :tags: bug, asyncio + :tickets: 7667 + + Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed + to raise an informative exception if the + :paramref:`_engine.Connection.execution_options.stream_results` execution + option were used, which is incompatible with a sync-style + :class:`_result.Result` object when using an asyncio calling style, as the + operation to fetch more rows would need to be awaited. An exception is now + raised in this scenario in the same way one was already raised when the + :paramref:`_engine.Connection.execution_options.stream_results` option + would be used with the :meth:`_asyncio.AsyncConnection.execute` method. + + Additionally, for improved stability with state-sensitive database drivers + such as asyncmy, the cursor is now closed when this error condition is + raised; previously with the asyncmy dialect, the connection would go into + an invalid state with unconsumed server side results remaining. + .. changelog:: :version: 1.4.31 diff --git a/doc/build/changelog/unreleased_14/7045.rst b/doc/build/changelog/unreleased_14/7045.rst deleted file mode 100644 index a3643a43352..00000000000 --- a/doc/build/changelog/unreleased_14/7045.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, tests - :tickets: 7045 - - Made corrections to the default pytest configuration regarding how test - discovery is configured, to fix issue where the test suite would not - configure warnings correctly and also attempt to load example suites as - tests, in the specific case where the SQLAlchemy checkout were located in - an absolute path that had a super-directory named "test". diff --git a/doc/build/changelog/unreleased_14/7518.rst b/doc/build/changelog/unreleased_14/7518.rst deleted file mode 100644 index bb5a9bc21b7..00000000000 --- a/doc/build/changelog/unreleased_14/7518.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, mysql, regression - :tickets: 7518 - - Fixed regression caused by :ticket:`7518` where changing the syntax "SHOW - VARIABLES" to "SELECT @@" broke compatibility with MySQL versions older - than 5.6, including early 5.0 releases. While these are very old MySQL - versions, a change in compatibility was not planned, so version-specific - logic has been restored to fall back to "SHOW VARIABLES" for MySQL server - versions < 5.6. diff --git a/doc/build/changelog/unreleased_14/7594.rst b/doc/build/changelog/unreleased_14/7594.rst deleted file mode 100644 index 08f22749ec0..00000000000 --- a/doc/build/changelog/unreleased_14/7594.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7594 - - Fixed regression where the ORM exception that is to be raised when an - INSERT silently fails to actually insert a row (such as from a trigger) - would not be reached, due to a runtime exception raised ahead of time due - to the missing primary key value, thus raising an uninformative exception - rather than the correct one. For 1.4 and above, a new - :class:`_ormexc.FlushError` is added for this case that's raised earlier - than the previous "null identity" exception was for 1.3, as a situation - where the number of rows actually INSERTed does not match what was expected - is a more critical situation in 1.4 as it prevents batching of multiple - objects from working correctly. This is separate from the case where a - newly fetched primary key is fetched as NULL, which continues to raise the - existing "null identity" exception. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7599.rst b/doc/build/changelog/unreleased_14/7599.rst deleted file mode 100644 index db69ace4663..00000000000 --- a/doc/build/changelog/unreleased_14/7599.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, tests - :tickets: 7599 - - Improvements to the test suite's integration with pytest such that the - "warnings" plugin, if manually enabled, will not interfere with the test - suite, such that third parties can enable the warnings plugin or make use - of the ``-W`` parameter and SQLAlchemy's test suite will continue to pass. - Additionally, modernized the detection of the "pytest-xdist" plugin so that - plugins can be globally disabled using PYTEST_DISABLE_PLUGIN_AUTOLOAD=1 - without breaking the test suite if xdist were still installed. Warning - filters that promote deprecation warnings to errors are now localized to - SQLAlchemy-specific warnings, or within SQLAlchemy-specific sources for - general Python deprecation warnings, so that non-SQLAlchemy deprecation - warnings emitted from pytest plugins should also not impact the test suite. - diff --git a/doc/build/changelog/unreleased_14/7600.rst b/doc/build/changelog/unreleased_14/7600.rst deleted file mode 100644 index 2f843ea1988..00000000000 --- a/doc/build/changelog/unreleased_14/7600.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: usecase, postgresql - :tickets: 7600 - - Added compiler support for the PostgreSQL ``NOT VALID`` phrase when rendering - DDL for the :class:`.CheckConstraint`, :class:`.ForeignKeyConstraint` - and :class:`.ForeignKey` schema constructs. Pull request courtesy - Gilbert Gilb's. - - .. seealso:: - - :ref:`postgresql_constraint_options` diff --git a/doc/build/changelog/unreleased_14/7612.rst b/doc/build/changelog/unreleased_14/7612.rst deleted file mode 100644 index c8992045fcf..00000000000 --- a/doc/build/changelog/unreleased_14/7612.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7612 - - Adjusted the logging for key SQLAlchemy components including - :class:`_engine.Engine`, :class:`_engine.Connection` to establish an - appropriate stack level parameter, so that the Python logging tokens - ``funcName`` and ``lineno`` when used in custom logging formatters will - report the correct information, which can be useful when filtering log - output; supported on Python 3.8 and above. Pull request courtesy Markus - Gerstel. diff --git a/doc/build/changelog/unreleased_14/7667.rst b/doc/build/changelog/unreleased_14/7667.rst deleted file mode 100644 index a66bb123851..00000000000 --- a/doc/build/changelog/unreleased_14/7667.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 7667 - - Fixed issue where the :meth:`_asyncio.AsyncSession.execute` method failed - to raise an informative exception if the - :paramref:`_engine.Connection.execution_options.stream_results` execution - option were used, which is incompatible with a sync-style - :class:`_result.Result` object when using an asyncio calling style, as the - operation to fetch more rows would need to be awaited. An exception is now - raised in this scenario in the same way one was already raised when the - :paramref:`_engine.Connection.execution_options.stream_results` option - would be used with the :meth:`_asyncio.AsyncConnection.execute` method. - - Additionally, for improved stability with state-sensitive database drivers - such as asyncmy, the cursor is now closed when this error condition is - raised; previously with the asyncmy dialect, the connection would go into - an invalid state with unconsumed server side results remaining. - diff --git a/doc/build/changelog/unreleased_14/7676.rst b/doc/build/changelog/unreleased_14/7676.rst deleted file mode 100644 index ec6275fb40c..00000000000 --- a/doc/build/changelog/unreleased_14/7676.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 7676 - - Fixed issue in Oracle dialect where using a column name that requires - quoting when written as a bound parameter, such as ``"_id"``, would not - correctly track a Python generated default value due to the bound-parameter - rewriting missing this value, causing an Oracle error to be raised. diff --git a/doc/build/changelog/unreleased_14/7697.rst b/doc/build/changelog/unreleased_14/7697.rst deleted file mode 100644 index 03b318cce2a..00000000000 --- a/doc/build/changelog/unreleased_14/7697.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7697 - - Fixed issue where using a fully qualified path for the classname in - :func:`_orm.relationship` that nonetheless contained an incorrect name for - path tokens that were not the first token, would fail to raise an - informative error and would instead fail randomly at a later step. diff --git a/doc/build/changelog/unreleased_14/7720_7789.rst b/doc/build/changelog/unreleased_14/7720_7789.rst deleted file mode 100644 index ddbc327df2b..00000000000 --- a/doc/build/changelog/unreleased_14/7720_7789.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql, mysql - :tickets: 7720, 7789, 7598 - - Fixed issues in MySQL :class:`_mysql.SET` datatype as well as the generic - :class:`.Enum` datatype where the ``__repr__()`` method would not render - all optional parameters in the string output, impacting the use of these - types in Alembic autogenerate. Pull request for MySQL courtesy Yuki - Nishimine. - diff --git a/doc/build/changelog/unreleased_14/7721.rst b/doc/build/changelog/unreleased_14/7721.rst deleted file mode 100644 index d719e223342..00000000000 --- a/doc/build/changelog/unreleased_14/7721.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7721 - - Fixed type-related error messages that would fail for values that were - tuples, due to string formatting syntax, including compile of unsupported - literal values and invalid boolean values. diff --git a/doc/build/changelog/unreleased_14/7736.rst b/doc/build/changelog/unreleased_14/7736.rst deleted file mode 100644 index 5071961d1c8..00000000000 --- a/doc/build/changelog/unreleased_14/7736.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, sqlite - :tickets: 7736 - - Fixed issue where SQLite unique constraint reflection would fail to detect - a column-inline UNIQUE constraint where the column name had an underscore - in its name. - -.. change:: - :tags: usecase, sqlite - :tickets: 7736 - - Added support for reflecting SQLite inline unique constraints where - the column names are formatted with SQLite "escape quotes" ``[]`` - or `````, which are discarded by the database when producing the - column name. diff --git a/doc/build/changelog/unreleased_14/7738.rst b/doc/build/changelog/unreleased_14/7738.rst deleted file mode 100644 index 322ddb458b5..00000000000 --- a/doc/build/changelog/unreleased_14/7738.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, mariadb, regression - :tickets: 7738 - - Fixed regression in mariadbconnector dialect as of mariadb connector 1.0.10 - where the DBAPI no longer pre-buffers cursor.lastrowid, leading to errors - when inserting objects with the ORM as well as causing non-availability of - the :attr:`_result.CursorResult.inserted_primary_key` attribute. The - dialect now fetches this value proactively for situations where it applies. diff --git a/doc/build/changelog/unreleased_14/7748.rst b/doc/build/changelog/unreleased_14/7748.rst deleted file mode 100644 index d9d6bf23613..00000000000 --- a/doc/build/changelog/unreleased_14/7748.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, oracle, regression - :tickets: 7748 - - Added support to parse "DPI" error codes from cx_Oracle exception objects - such as ``DPI-1080`` and ``DPI-1010``, both of which now indicate a - disconnect scenario as of cx_Oracle 8.3. diff --git a/doc/build/changelog/unreleased_14/7760.rst b/doc/build/changelog/unreleased_14/7760.rst deleted file mode 100644 index 2f0d403dd85..00000000000 --- a/doc/build/changelog/unreleased_14/7760.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7760 - - Fixed issue where the :meth:`.HasCTE.add_cte` method as called upon a - :class:`.TextualSelect` instance was not being accommodated by the SQL - compiler. The fix additionally adds more "SELECT"-like compiler behavior to - :class:`.TextualSelect` including that DML CTEs such as UPDATE and INSERT - may be accommodated. diff --git a/doc/build/changelog/unreleased_14/async_no_event.rst b/doc/build/changelog/unreleased_14/async_no_event.rst deleted file mode 100644 index 8deda89453d..00000000000 --- a/doc/build/changelog/unreleased_14/async_no_event.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, asyncio - - Fixed issues where a descriptive error message was not raised for some - classes of event listening with an async engine, which should instead be a - sync engine instance. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/enum_length_warning.rst b/doc/build/changelog/unreleased_14/enum_length_warning.rst deleted file mode 100644 index f1dfab64cbf..00000000000 --- a/doc/build/changelog/unreleased_14/enum_length_warning.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql - - The :class:`_sqltypes.Enum` datatype now emits a warning if the - :paramref:`_sqltypes.Enum.length` argument is specified without also - specifying :paramref:`_sqltypes.Enum.native_enum` as False, as the - parameter is otherwise silently ignored in this case, despite the fact that - the :class:`_sqltypes.Enum` datatype will still render VARCHAR DDL on - backends that don't have a native ENUM datatype such as SQLite. This - behavior may change in a future release so that "length" is honored for all - non-native "enum" types regardless of the "native_enum" setting. - diff --git a/doc/build/conf.py b/doc/build/conf.py index e1e798286b5..f29349c7955 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -206,9 +206,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.31" +release = "1.4.32" -release_date = "January 20, 2022" +release_date = "March 6, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From de95dc4ce5ed44cc63d9fd8b2e00a78858a73d2a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 6 Mar 2022 17:43:26 -0500 Subject: [PATCH 144/632] Version 1.4.33 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index eacb83edfd4..fa70f560645 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.33 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.32 :released: March 6, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 9fb36951323..fc34b3ab262 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.32" +__version__ = "1.4.33" def __go(lcls): From a8102ba496c4c11eae6b904a962cf352902f0de7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 7 Mar 2022 11:17:47 -0500 Subject: [PATCH 145/632] test sqlite w/ savepoint workaround in session fixture test Fixes: #7795 Change-Id: Ib790581555656c088f86c00080c70d19ca295a03 (cherry picked from commit fbacb1991585202a5bf22acb0d36b5c979bcfad8) --- lib/sqlalchemy/testing/engines.py | 14 ++++++++++++++ test/orm/test_transaction.py | 12 ++++++------ test/requirements.py | 10 ++++++++++ 3 files changed, 30 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index a92d476ac54..b8be6b9bd55 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -276,10 +276,12 @@ def testing_engine( future=None, asyncio=False, transfer_staticpool=False, + _sqlite_savepoint=False, ): """Produce an engine configured by --options with optional overrides.""" if asyncio: + assert not _sqlite_savepoint from sqlalchemy.ext.asyncio import ( create_async_engine as create_engine, ) @@ -294,9 +296,11 @@ def testing_engine( if not options: use_reaper = True scope = "function" + sqlite_savepoint = False else: use_reaper = options.pop("use_reaper", True) scope = options.pop("scope", "function") + sqlite_savepoint = options.pop("sqlite_savepoint", False) url = url or config.db.url @@ -312,6 +316,16 @@ def testing_engine( engine = create_engine(url, **options) + if sqlite_savepoint and engine.name == "sqlite": + # apply SQLite savepoint workaround + @event.listens_for(engine, "connect") + def do_connect(dbapi_connection, connection_record): + dbapi_connection.isolation_level = None + + @event.listens_for(engine, "begin") + def do_begin(conn): + conn.exec_driver_sql("BEGIN") + if transfer_staticpool: from sqlalchemy.pool import StaticPool diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py index 603ec079a76..e077220e19b 100644 --- a/test/orm/test_transaction.py +++ b/test/orm/test_transaction.py @@ -2526,10 +2526,10 @@ def test_key_replaced_by_oob_insert(self): class JoinIntoAnExternalTransactionFixture(object): """Test the "join into an external transaction" examples""" - __leave_connections_for_teardown__ = True - def setup_test(self): - self.engine = testing.db + self.engine = engines.testing_engine( + options={"use_reaper": False, "sqlite_savepoint": True} + ) self.connection = self.engine.connect() self.metadata = MetaData() @@ -2590,7 +2590,7 @@ class A(object): # bind an individual Session to the connection self.session = Session(bind=self.connection, future=True) - if testing.requires.savepoints.enabled: + if testing.requires.compat_savepoints.enabled: self.nested = self.connection.begin_nested() @event.listens_for(self.session, "after_transaction_end") @@ -2607,7 +2607,7 @@ def teardown_session(self): if self.trans.is_active: self.trans.rollback() - @testing.requires.savepoints + @testing.requires.compat_savepoints def test_something_with_context_managers(self): A = self.A @@ -2673,7 +2673,7 @@ class A(object): # bind an individual Session to the connection self.session = Session(bind=self.connection) - if testing.requires.savepoints.enabled: + if testing.requires.compat_savepoints.enabled: # start the session in a SAVEPOINT... self.session.begin_nested() diff --git a/test/requirements.py b/test/requirements.py index 1780e3b21a2..4c9ac40c54d 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -558,6 +558,16 @@ def savepoints(self): "savepoints not supported", ) + @property + def compat_savepoints(self): + """Target database must support savepoints, or a compat + recipe e.g. for sqlite will be used""" + + return skip_if( + ["sybase", ("mysql", "<", (5, 0, 3))], + "savepoints not supported", + ) + @property def savepoints_w_release(self): return self.savepoints + skip_if( From 0aba7482241a32694f0b478d1a99e7821d50a34f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 7 Mar 2022 15:11:29 -0500 Subject: [PATCH 146/632] support selectin_polymorphic w/ no fixed polymorphic_on Fixed issue where the :func:`_orm.polymorphic_selectin` loader option would not work with joined inheritance mappers that don't have a fixed "polymorphic_on" column. Additionally added test support for a wider variety of usage patterns with this construct. Fixed bug where :func:`_orm.composite` attributes would not work in conjunction with the :func:`_orm.selectin_polymorphic` loader strategy for joined table inheritance. Fixes: #7799 Fixes: #7801 Change-Id: I7cfe32dfe844b188403b39545930c0aee71d0119 (cherry picked from commit 47509a2916ad1ca09b62d44ffabd66e7bda1827d) --- doc/build/changelog/unreleased_14/7799.rst | 8 + doc/build/changelog/unreleased_14/7801.rst | 8 + lib/sqlalchemy/orm/mapper.py | 11 +- test/orm/inheritance/test_poly_loading.py | 342 ++++++++++++++++++--- 4 files changed, 320 insertions(+), 49 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7799.rst create mode 100644 doc/build/changelog/unreleased_14/7801.rst diff --git a/doc/build/changelog/unreleased_14/7799.rst b/doc/build/changelog/unreleased_14/7799.rst new file mode 100644 index 00000000000..00254738ee9 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7799.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 7799 + + Fixed issue where the :func:`_orm.polymorphic_selectin` loader option would + not work with joined inheritance mappers that don't have a fixed + "polymorphic_on" column. Additionally added test support for a wider + variety of usage patterns with this construct. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7801.rst b/doc/build/changelog/unreleased_14/7801.rst new file mode 100644 index 00000000000..4df3bdf8764 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7801.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 7801 + + Fixed bug where :func:`_orm.composite` attributes would not work in + conjunction with the :func:`_orm.selectin_polymorphic` loader strategy for + joined table inheritance. + diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index b12ade59c33..967c1064ad3 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -37,6 +37,7 @@ from .interfaces import MapperProperty from .interfaces import ORMEntityColumnsClauseRole from .interfaces import ORMFromClauseRole +from .interfaces import StrategizedProperty from .path_registry import PathRegistry from .. import event from .. import exc as sa_exc @@ -3089,8 +3090,11 @@ def _subclass_load_via_in(self, entity): assert self.inherits - polymorphic_prop = self._columntoproperty[self.polymorphic_on] - keep_props = set([polymorphic_prop] + self._identity_key_props) + if self.polymorphic_on is not None: + polymorphic_prop = self._columntoproperty[self.polymorphic_on] + keep_props = set([polymorphic_prop] + self._identity_key_props) + else: + keep_props = set(self._identity_key_props) disable_opt = strategy_options.Load(entity) enable_opt = strategy_options.Load(entity) @@ -3099,6 +3103,9 @@ def _subclass_load_via_in(self, entity): if prop.parent is self or prop in keep_props: # "enable" options, to turn on the properties that we want to # load by default (subject to options from the query) + if not isinstance(prop, StrategizedProperty): + continue + enable_opt.set_generic_strategy( # convert string name to an attribute before passing # to loader strategy diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 1a1838ad68b..1e3b15575d7 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -2,10 +2,13 @@ from sqlalchemy import ForeignKey from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union from sqlalchemy.orm import backref +from sqlalchemy.orm import composite from sqlalchemy.orm import defaultload from sqlalchemy.orm import immediateload from sqlalchemy.orm import joinedload @@ -26,6 +29,7 @@ from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.assertsql import EachOf from sqlalchemy.testing.assertsql import Or +from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from ._poly_fixtures import _Polymorphic @@ -423,7 +427,10 @@ def test_threelevel_selectin_to_inline_options(self): with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) - def test_threelevel_selectin_to_inline_awkward_alias_options(self): + @testing.combinations((True,), (False,)) + def test_threelevel_selectin_to_inline_awkward_alias_options( + self, use_aliased_class + ): self._fixture_from_geometry( { "a": { @@ -454,57 +461,99 @@ def test_threelevel_selectin_to_inline_awkward_alias_options(self): ) c_alias = with_polymorphic(c, (d, e), poly) - q = ( - sess.query(a) - .options(selectin_polymorphic(a, [b, c_alias])) - .order_by(a.id) - ) - result = self.assert_sql_execution( - testing.db, - q.all, - CompiledSQL( - "SELECT a.type AS a_type, a.id AS a_id, " - "a.a_data AS a_a_data FROM a ORDER BY a.id", - {}, - ), - Or( - # here, the test is that the adaptation of "a" takes place + if use_aliased_class: + opt = selectin_polymorphic(a, [b, c_alias]) + else: + opt = selectin_polymorphic( + a, + [b, c_alias, d, e], + ) + q = sess.query(a).options(opt).order_by(a.id) + + if use_aliased_class: + result = self.assert_sql_execution( + testing.db, + q.all, CompiledSQL( - "SELECT poly.a_type AS poly_a_type, " - "poly.c_id AS poly_c_id, " - "poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, " - "poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data, " - "poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data " - "FROM (SELECT a.id AS a_id, a.type AS a_type, " - "c.id AS c_id, " - "c.c_data AS c_c_data, d.id AS d_id, " - "d.d_data AS d_d_data, " - "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " - "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " - "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " - "ORDER BY poly.a_id", - [{"primary_keys": [1, 2]}], + "SELECT a.type AS a_type, a.id AS a_id, " + "a.a_data AS a_a_data FROM a ORDER BY a.id", + {}, + ), + Or( + # here, the test is that the adaptation of "a" takes place + CompiledSQL( + "SELECT poly.a_type AS poly_a_type, " + "poly.c_id AS poly_c_id, " + "poly.a_id AS poly_a_id, poly.c_c_data " + "AS poly_c_c_data, " + "poly.e_id AS poly_e_id, poly.e_e_data " + "AS poly_e_e_data, " + "poly.d_id AS poly_d_id, poly.d_d_data " + "AS poly_d_d_data " + "FROM (SELECT a.id AS a_id, a.type AS a_type, " + "c.id AS c_id, " + "c.c_data AS c_c_data, d.id AS d_id, " + "d.d_data AS d_d_data, " + "e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c " + "ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id " + "LEFT OUTER JOIN e ON c.id = e.id) AS poly " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY poly.a_id", + [{"primary_keys": [1, 2]}], + ), + CompiledSQL( + "SELECT poly.a_type AS poly_a_type, " + "poly.c_id AS poly_c_id, " + "poly.a_id AS poly_a_id, " + "poly.c_c_data AS poly_c_c_data, " + "poly.d_id AS poly_d_id, poly.d_d_data " + "AS poly_d_d_data, " + "poly.e_id AS poly_e_id, poly.e_e_data " + "AS poly_e_e_data " + "FROM (SELECT a.id AS a_id, a.type AS a_type, " + "c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, " + "d.d_data AS d_d_data, e.id AS e_id, " + "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " + "LEFT OUTER JOIN d ON c.id = d.id " + "LEFT OUTER JOIN e ON c.id = e.id) AS poly " + "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY poly.a_id", + [{"primary_keys": [1, 2]}], + ), ), + ) + else: + result = self.assert_sql_execution( + testing.db, + q.all, CompiledSQL( - "SELECT poly.a_type AS poly_a_type, " - "poly.c_id AS poly_c_id, " - "poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, " - "poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data, " - "poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data " - "FROM (SELECT a.id AS a_id, a.type AS a_type, " - "c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, " - "d.d_data AS d_d_data, e.id AS e_id, " - "e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id " - "LEFT OUTER JOIN d ON c.id = d.id " - "LEFT OUTER JOIN e ON c.id = e.id) AS poly " - "WHERE poly.a_id IN (__[POSTCOMPILE_primary_keys]) " - "ORDER BY poly.a_id", - [{"primary_keys": [1, 2]}], + "SELECT a.type AS a_type, a.id AS a_id, " + "a.a_data AS a_a_data FROM a ORDER BY a.id", + {}, ), - ), - ) + AllOf( + CompiledSQL( + "SELECT a.type AS a_type, d.id AS d_id, c.id AS c_id, " + "a.id AS a_id, " + "d.d_data AS d_d_data FROM a " + "JOIN c ON a.id = c.id JOIN d ON c.id = d.id " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", + [{"primary_keys": [1]}], + ), + CompiledSQL( + "SELECT a.type AS a_type, e.id AS e_id, c.id AS c_id, " + "a.id AS a_id, " + "e.e_data AS e_e_data FROM a " + "JOIN c ON a.id = c.id JOIN e ON c.id = e.id " + "WHERE a.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY a.id", + [{"primary_keys": [2]}], + ), + ), + ) + with self.assert_statement_count(testing.db, 0): eq_(result, [d(d_data="d1"), e(e_data="e1")]) @@ -923,3 +972,202 @@ class AnyOpt(CompileStateOption): u = sess.execute(select(User).options(*opts)).scalars().one() address = u.address eq_(inspect(address).load_options, set(opts)) + + +class NoBaseWPPlusAliasedTest( + testing.AssertsExecutionResults, fixtures.TestBase +): + """test for #7799""" + + @testing.fixture + def mapping_fixture(self, registry, connection): + Base = registry.generate_base() + + class BaseClass(Base): + __tablename__ = "baseclass" + id = Column( + Integer, + primary_key=True, + unique=True, + ) + + class A(BaseClass): + __tablename__ = "a" + + id = Column(ForeignKey(BaseClass.id), primary_key=True) + thing1 = Column(String(50)) + + __mapper_args__ = {"polymorphic_identity": "a"} + + class B(BaseClass): + __tablename__ = "b" + + id = Column(ForeignKey(BaseClass.id), primary_key=True) + thing2 = Column(String(50)) + + __mapper_args__ = {"polymorphic_identity": "b"} + + registry.metadata.create_all(connection) + with Session(connection) as sess: + + sess.add_all( + [ + A(thing1="thing1_1"), + A(thing1="thing1_2"), + B(thing2="thing2_2"), + B(thing2="thing2_3"), + A(thing1="thing1_3"), + A(thing1="thing1_4"), + B(thing2="thing2_1"), + B(thing2="thing2_4"), + ] + ) + + sess.commit() + + return BaseClass, A, B + + def test_wp(self, mapping_fixture, connection): + BaseClass, A, B = mapping_fixture + + stmt = union( + select(A.id, literal("a").label("type")), + select(B.id, literal("b").label("type")), + ).subquery() + + wp = with_polymorphic( + BaseClass, + [A, B], + selectable=stmt, + polymorphic_on=stmt.c.type, + ) + + session = Session(connection) + + with self.sql_execution_asserter() as asserter: + result = session.scalars( + select(wp) + .options(selectin_polymorphic(wp, [A, B])) + .order_by(wp.id) + ) + for obj in result: + if isinstance(obj, A): + obj.thing1 + else: + obj.thing2 + + asserter.assert_( + CompiledSQL( + "SELECT anon_1.id, anon_1.type FROM " + "(SELECT a.id AS id, :param_1 AS type FROM baseclass " + "JOIN a ON baseclass.id = a.id " + "UNION SELECT b.id AS id, :param_2 AS type " + "FROM baseclass JOIN b ON baseclass.id = b.id) AS anon_1 " + "ORDER BY anon_1.id", + [{"param_1": "a", "param_2": "b"}], + ), + AllOf( + CompiledSQL( + "SELECT a.id AS a_id, baseclass.id AS baseclass_id, " + "a.thing1 AS a_thing1 FROM baseclass " + "JOIN a ON baseclass.id = a.id " + "WHERE baseclass.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY baseclass.id", + {"primary_keys": [1, 2, 5, 6]}, + ), + CompiledSQL( + "SELECT b.id AS b_id, baseclass.id AS baseclass_id, " + "b.thing2 AS b_thing2 FROM baseclass " + "JOIN b ON baseclass.id = b.id " + "WHERE baseclass.id IN (__[POSTCOMPILE_primary_keys]) " + "ORDER BY baseclass.id", + {"primary_keys": [3, 4, 7, 8]}, + ), + ), + ) + + +class CompositeAttributesTest(fixtures.TestBase): + @testing.fixture + def mapping_fixture(self, registry, connection): + Base = registry.generate_base() + + class BaseCls(Base): + __tablename__ = "base" + id = Column( + Integer, primary_key=True, test_needs_autoincrement=True + ) + type = Column(String(50)) + + __mapper_args__ = {"polymorphic_on": type} + + class XYThing: + def __init__(self, x, y): + self.x = x + self.y = y + + def __composite_values__(self): + return (self.x, self.y) + + def __eq__(self, other): + return ( + isinstance(other, XYThing) + and other.x == self.x + and other.y == self.y + ) + + def __ne__(self, other): + return not self.__eq__(other) + + class A(ComparableEntity, BaseCls): + __tablename__ = "a" + id = Column(ForeignKey(BaseCls.id), primary_key=True) + thing1 = Column(String(50)) + comp1 = composite( + XYThing, Column("x1", Integer), Column("y1", Integer) + ) + + __mapper_args__ = { + "polymorphic_identity": "a", + "polymorphic_load": "selectin", + } + + class B(ComparableEntity, BaseCls): + __tablename__ = "b" + id = Column(ForeignKey(BaseCls.id), primary_key=True) + thing2 = Column(String(50)) + comp2 = composite( + XYThing, Column("x2", Integer), Column("y2", Integer) + ) + + __mapper_args__ = { + "polymorphic_identity": "b", + "polymorphic_load": "selectin", + } + + registry.metadata.create_all(connection) + + with Session(connection) as sess: + sess.add_all( + [ + A(id=1, thing1="thing1", comp1=XYThing(1, 2)), + B(id=2, thing2="thing2", comp2=XYThing(3, 4)), + ] + ) + sess.commit() + + return BaseCls, A, B, XYThing + + def test_load_composite(self, mapping_fixture, connection): + BaseCls, A, B, XYThing = mapping_fixture + + with Session(connection) as sess: + rows = sess.scalars(select(BaseCls).order_by(BaseCls.id)).all() + + eq_( + rows, + [ + A(id=1, thing1="thing1", comp1=XYThing(1, 2)), + B(id=2, thing2="thing2", comp2=XYThing(3, 4)), + ], + ) From 26b819e534bd51e32e31a1e3324d4a1e7b849d67 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 8 Mar 2022 13:40:12 -0500 Subject: [PATCH 147/632] pop the stack that we pushed Fixed regression caused by :ticket:`7760` where the new capabilities of :class:`.TextualSelect` were not fully implemented within the compiler properly, leading to issues with composed INSERT constructs such as "INSERT FROM SELECT" and "INSERT...ON CONFLICT" when combined with CTE and textual statements. Fixes: #7798 Change-Id: Ia2ce92507e574dd36fd26dd38ec9dd2713584467 (cherry picked from commit c36965ab211183764357456fff1640418586ed97) --- doc/build/changelog/unreleased_14/7798.rst | 9 +++++++++ lib/sqlalchemy/sql/compiler.py | 2 ++ test/dialect/postgresql/test_compiler.py | 22 ++++++++++++++++++++++ test/sql/test_cte.py | 21 +++++++++++++++++++++ 4 files changed, 54 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7798.rst diff --git a/doc/build/changelog/unreleased_14/7798.rst b/doc/build/changelog/unreleased_14/7798.rst new file mode 100644 index 00000000000..31a5bb2e421 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7798.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql, regression + :tickets: 7798 + + Fixed regression caused by :ticket:`7760` where the new capabilities of + :class:`.TextualSelect` were not fully implemented within the compiler + properly, leading to issues with composed INSERT constructs such as "INSERT + FROM SELECT" and "INSERT...ON CONFLICT" when combined with CTE and textual + statements. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 6be8ae281fe..7780d3782a4 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1664,6 +1664,8 @@ def visit_textual_select( nesting_level = len(self.stack) if not toplevel else None text = self._render_cte_clause(nesting_level=nesting_level) + text + self.stack.pop(-1) + return text def visit_null(self, expr, **kw): diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 383a77c1d61..49ab15261e6 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -2338,6 +2338,28 @@ def test_on_conflict_do_no_call_twice(self): ): meth() + def test_on_conflict_cte_plus_textual(self): + """test #7798""" + + bar = table("bar", column("id"), column("attr"), column("foo_id")) + s1 = text("SELECT bar.id, bar.attr FROM bar").columns( + bar.c.id, bar.c.attr + ) + s2 = ( + insert(bar) + .from_select(list(s1.selected_columns), s1) + .on_conflict_do_update( + index_elements=[s1.selected_columns.id], + set_={"attr": s1.selected_columns.attr}, + ) + ) + + self.assert_compile( + s2, + "INSERT INTO bar (id, attr) SELECT bar.id, bar.attr " + "FROM bar ON CONFLICT (id) DO UPDATE SET attr = bar.attr", + ) + def test_do_nothing_no_target(self): i = ( diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index 1afa5c8558a..d146ae60664 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -1658,6 +1658,27 @@ def test_textual_select_uses_independent_cte_two(self): "(SELECT id FROM baz)", ) + def test_textual_select_stack_correction(self): + """test #7798 , regression from #7760""" + + foo = table("foo", column("id")) + bar = table("bar", column("id"), column("attr"), column("foo_id")) + + s1 = text("SELECT id FROM foo").columns(foo.c.id) + s2 = text( + "SELECT bar.id, bar.attr FROM bar WHERE br.id IN " + "(SELECT id FROM baz)" + ).columns(bar.c.id, bar.c.attr) + s3 = bar.insert().from_select(list(s2.selected_columns), s2) + s4 = s3.add_cte(s1.cte(name="baz")) + + self.assert_compile( + s4, + "WITH baz AS (SELECT id FROM foo) INSERT INTO bar (id, attr) " + "SELECT bar.id, bar.attr FROM bar WHERE br.id IN " + "(SELECT id FROM baz)", + ) + def test_insert_uses_independent_cte(self): products = table("products", column("id"), column("price")) From 78c0d4f3c6961accbe9911588e73b33a0179a098 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 8 Mar 2022 09:34:09 -0500 Subject: [PATCH 148/632] support adapt_on_names for with_polymorphic Added :paramref:`_orm.with_polymorphic.adapt_on_names` to the :func:`_orm.with_polymorphic` function, which allows a polymorphic load (typically with concrete mapping) to be stated against an alternative selectable that will adapt to the original mapped selectable on column names alone. Fixes: #7805 Change-Id: I933e180a489fec8a6f4916d1622d444dd4434f30 (cherry picked from commit 33e198185b2c8cc5a61c990d09a76bec76fafe31) --- doc/build/changelog/unreleased_14/7805.rst | 9 + lib/sqlalchemy/orm/util.py | 15 +- test/orm/inheritance/test_concrete.py | 244 ++++++++++++++++++++- 3 files changed, 266 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7805.rst diff --git a/doc/build/changelog/unreleased_14/7805.rst b/doc/build/changelog/unreleased_14/7805.rst new file mode 100644 index 00000000000..2d2940239a6 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7805.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: usecase, orm + :tickets: 7805 + + Added :paramref:`_orm.with_polymorphic.adapt_on_names` to the + :func:`_orm.with_polymorphic` function, which allows a polymorphic load + (typically with concrete mapping) to be stated against an alternative + selectable that will adapt to the original mapped selectable on column + names alone. diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index d90d44ce900..0cd6b8f41c6 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -709,7 +709,9 @@ def __init__( # are not even the thing we are mapping, such as embedded # selectables in subqueries or CTEs. See issue #6060 adapt_from_selectables=[ - m.selectable for m in self.with_polymorphic_mappers + m.selectable + for m in self.with_polymorphic_mappers + if not adapt_on_names ], ) @@ -1338,6 +1340,7 @@ def with_polymorphic( flat=False, polymorphic_on=None, aliased=False, + adapt_on_names=False, innerjoin=False, _use_mapper_path=False, _existing_alias=None, @@ -1402,6 +1405,15 @@ def with_polymorphic( :param innerjoin: if True, an INNER JOIN will be used. This should only be specified if querying for one specific subtype only + + :param adapt_on_names: Passes through the + :paramref:`_orm.aliased.adapt_on_names` + parameter to the aliased object. This may be useful in situations where + the given selectable is not directly related to the existing mapped + selectable. + + .. versionadded:: 1.4.33 + """ primary_mapper = _class_to_mapper(base) @@ -1429,6 +1441,7 @@ def with_polymorphic( return AliasedClass( base, selectable, + adapt_on_names=adapt_on_names, with_polymorphic_mappers=mappers, with_polymorphic_discriminator=polymorphic_on, use_mapper_path=_use_mapper_path, diff --git a/test/orm/inheritance/test_concrete.py b/test/orm/inheritance/test_concrete.py index d9dfa3d9e6c..56beffedd57 100644 --- a/test/orm/inheritance/test_concrete.py +++ b/test/orm/inheritance/test_concrete.py @@ -5,24 +5,34 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union from sqlalchemy import union_all +from sqlalchemy.ext.declarative import AbstractConcreteBase from sqlalchemy.ext.hybrid import hybrid_property +from sqlalchemy.orm import aliased from sqlalchemy.orm import attributes from sqlalchemy.orm import class_mapper from sqlalchemy.orm import clear_mappers +from sqlalchemy.orm import composite from sqlalchemy.orm import configure_mappers +from sqlalchemy.orm import contains_eager +from sqlalchemy.orm import declared_attr from sqlalchemy.orm import joinedload from sqlalchemy.orm import polymorphic_union from sqlalchemy.orm import relationship -from sqlalchemy.orm.util import with_polymorphic +from sqlalchemy.orm import Session +from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock +from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.entities import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from test.orm.test_events import _RemoveListeners class ConcreteTest(fixtures.MappedTest): @@ -1434,3 +1444,235 @@ class Office(Location): eq_(sess.get(Refugee, 2).name, "refugee2") eq_(sess.get(Office, 1).name, "office1") eq_(sess.get(Office, 2).name, "office2") + + +class AdaptOnNamesTest(_RemoveListeners, fixtures.DeclarativeMappedTest): + """test the full integration case for #7805""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + Basic = cls.Basic + + class Metadata(ComparableEntity, Base): + __tablename__ = "metadata" + id = Column( + Integer, + primary_key=True, + ) + + some_data = Column(String(50)) + + class BaseObj(ComparableEntity, AbstractConcreteBase, Base): + """abstract concrete base with a custom polymorphic_union. + + Additionally, at query time it needs to use a new version of this + union each time in order to add filter criteria. this is because + polymorphic_union() is of course very inefficient in its form + and if someone actually has to use this, it's likely better for + filter criteria to be within each sub-select. The current use + case here does not really have easy answers as we don't have + a built-in widget that does this. The complexity / little use + ratio doesn't justify it unfortunately. + + This use case might be easier if we were mapped to something that + can be adapted. however, we are using adapt_on_names here as this + is usually what's more accessible to someone trying to get into + this, or at least we should make that feature work as well as it + can. + + """ + + @declared_attr + def id(cls): + return Column(Integer, primary_key=True) + + @declared_attr + def metadata_id(cls): + return Column(ForeignKey(Metadata.id), nullable=False) + + @classmethod + def _create_polymorphic_union(cls, mappers, discriminator_name): + return cls.make_statement().subquery() + + @declared_attr + def related_metadata(cls): + return relationship(Metadata) + + @classmethod + def make_statement(cls, *filter_cond, **kw): + include_metadata = kw.pop("include_metadata", False) + a_stmt = ( + select( + A.id, + A.metadata_id, + A.thing1, + A.x1, + A.y1, + null().label("thing2"), + null().label("x2"), + null().label("y2"), + literal("a").label("type"), + ) + .join(Metadata) + .filter(*filter_cond) + ) + if include_metadata: + a_stmt = a_stmt.add_columns(Metadata.__table__) + + b_stmt = ( + select( + B.id, + B.metadata_id, + null().label("thing1"), + null().label("x1"), + null().label("y1"), + B.thing2, + B.x2, + B.y2, + literal("b").label("type"), + ) + .join(Metadata) + .filter(*filter_cond) + ) + if include_metadata: + b_stmt = b_stmt.add_columns(Metadata.__table__) + + return union(a_stmt, b_stmt) + + class XYThing(Basic): + def __init__(self, x, y): + self.x = x + self.y = y + + def __composite_values__(self): + return (self.x, self.y) + + def __eq__(self, other): + return ( + isinstance(other, XYThing) + and other.x == self.x + and other.y == self.y + ) + + def __ne__(self, other): + return not self.__eq__(other) + + class A(BaseObj): + __tablename__ = "a" + thing1 = Column(String(50)) + comp1 = composite( + XYThing, Column("x1", Integer), Column("y1", Integer) + ) + + __mapper_args__ = {"polymorphic_identity": "a", "concrete": True} + + class B(BaseObj): + __tablename__ = "b" + thing2 = Column(String(50)) + comp2 = composite( + XYThing, Column("x2", Integer), Column("y2", Integer) + ) + + __mapper_args__ = {"polymorphic_identity": "b", "concrete": True} + + @classmethod + def insert_data(cls, connection): + Metadata, A, B = cls.classes("Metadata", "A", "B") + XYThing = cls.classes.XYThing + + with Session(connection) as sess: + sess.add_all( + [ + Metadata(id=1, some_data="m1"), + Metadata(id=2, some_data="m2"), + ] + ) + sess.flush() + + sess.add_all( + [ + A( + id=5, + metadata_id=1, + thing1="thing1", + comp1=XYThing(1, 2), + ), + B( + id=6, + metadata_id=2, + thing2="thing2", + comp2=XYThing(3, 4), + ), + ] + ) + sess.commit() + + def test_contains_eager(self): + Metadata, A, B = self.classes("Metadata", "A", "B") + BaseObj = self.classes.BaseObj + XYThing = self.classes.XYThing + + alias = BaseObj.make_statement( + Metadata.id < 3, include_metadata=True + ).subquery() + ac = with_polymorphic( + BaseObj, + [A, B], + selectable=alias, + adapt_on_names=True, + ) + + mt = aliased(Metadata, alias=alias) + + sess = fixture_session() + + with self.sql_execution_asserter() as asserter: + objects = sess.scalars( + select(ac) + .options( + contains_eager(ac.A.related_metadata.of_type(mt)), + contains_eager(ac.B.related_metadata.of_type(mt)), + ) + .order_by(ac.id) + ).all() + + eq_( + objects, + [ + A( + id=5, + metadata_id=1, + thing1="thing1", + comp1=XYThing(1, 2), + related_metadata=Metadata(id=1, some_data="m1"), + ), + B( + id=6, + metadata_id=2, + thing2="thing2", + comp2=XYThing(3, 4), + related_metadata=Metadata(id=2, some_data="m2"), + ), + ], + ) + asserter.assert_( + CompiledSQL( + "SELECT anon_1.id, anon_1.metadata_id, anon_1.thing1, " + "anon_1.x1, anon_1.y1, anon_1.thing2, anon_1.x2, anon_1.y2, " + "anon_1.type, anon_1.id_1, anon_1.some_data FROM " + "(SELECT a.id AS id, a.metadata_id AS metadata_id, " + "a.thing1 AS thing1, a.x1 AS x1, a.y1 AS y1, " + "NULL AS thing2, NULL AS x2, NULL AS y2, :param_1 AS type, " + "metadata.id AS id_1, metadata.some_data AS some_data " + "FROM a JOIN metadata ON metadata.id = a.metadata_id " + "WHERE metadata.id < :id_2 UNION SELECT b.id AS id, " + "b.metadata_id AS metadata_id, NULL AS thing1, NULL AS x1, " + "NULL AS y1, b.thing2 AS thing2, b.x2 AS x2, b.y2 AS y2, " + ":param_2 AS type, metadata.id AS id_1, " + "metadata.some_data AS some_data FROM b " + "JOIN metadata ON metadata.id = b.metadata_id " + "WHERE metadata.id < :id_3) AS anon_1 ORDER BY anon_1.id", + [{"param_1": "a", "id_2": 3, "param_2": "b", "id_3": 3}], + ) + ) From bb665d52ef0f756783754618d87a9f47cde6af81 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 8 Mar 2022 18:38:11 -0500 Subject: [PATCH 149/632] add note about slots=True for attrs Fixes: #7802 Change-Id: Ic5fadd369a0b63309cd9c44798ee5395efdbab2b (cherry picked from commit e790b11460ffaf8cc68b309ad47fe0e314b2c67f) --- doc/build/orm/declarative_styles.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index 2b4178a856f..284e5a1e61b 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -480,5 +480,12 @@ A mapping using ``@attr.s``, in conjunction with imperative table:: with the :meth:`_orm.registry.map_imperatively` function. See the section :ref:`orm_imperative_dataclasses` for a similar example. +.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on + a mapped class, cannot be used with SQLAlchemy mappings without fully + implementing alternative + :ref:`attribute instrumentation `, as mapped + classes normally rely upon direct access to ``__dict__`` for state storage. + Behavior is undefined when this option is present. + .. _dataclasses: https://docs.python.org/3/library/dataclasses.html .. _attrs: https://pypi.org/project/attrs/ From cc78f862defccc3f52cc040b4edb9094ba28f6b8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 10:08:36 -0500 Subject: [PATCH 150/632] ORM quickstart This is done in 1.4 style so it can be backported to 1.4. Will put this up as is, we can work on it. For 2.0, the ORM mapping will be updated to mapped_column() style when we do the full pass. Change-Id: Icfdf81449973844dac244b3a107ce955a7d3b16c (cherry picked from commit 86fbd4a2155c31cd79f7446456b03f4cd5249050) --- doc/build/index.rst | 2 + doc/build/orm/index.rst | 1 + doc/build/orm/quickstart.rst | 415 ++++++++++++++++++++++ doc/build/orm/session_basics.rst | 2 + doc/build/tutorial/data_select.rst | 14 + doc/build/tutorial/dbapi_transactions.rst | 5 + test/base/test_tutorials.py | 3 + 7 files changed, 442 insertions(+) create mode 100644 doc/build/orm/quickstart.rst diff --git a/doc/build/index.rst b/doc/build/index.rst index d39f1217a75..555ffba8a52 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -38,6 +38,8 @@ SQLAlchemy Documentation Core and ORM working styles more closely than ever. The new tutorial introduces both concepts in parallel. + * **For a quick glance:** :doc:`/orm/quickstart` - a glimpse at what working with the ORM looks like + * **For all users:** :doc:`/tutorial/index` - The new SQLAlchemy 1.4/2.0 Tutorial .. container:: diff --git a/doc/build/orm/index.rst b/doc/build/orm/index.rst index 8434df62c7d..e37f472965a 100644 --- a/doc/build/orm/index.rst +++ b/doc/build/orm/index.rst @@ -12,6 +12,7 @@ tutorial. :maxdepth: 2 tutorial + quickstart mapper_config relationships loading_objects diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst new file mode 100644 index 00000000000..d670fadb966 --- /dev/null +++ b/doc/build/orm/quickstart.rst @@ -0,0 +1,415 @@ +.. _orm_quickstart: + + +ORM Quick Start +=============== + +For new users who want to quickly see what basic ORM use looks like, here's an +abbreviated form of the mappings and examples used in the +:ref:`unified_tutorial`. The code here is fully runnable from a clean command +line. + +As the descriptions in this section are intentionally **very short**, please +proceed to the full :ref:`unified_tutorial` for a much more in-depth +description of each of the concepts being illustrated here. + + +Step One - Declare Models +========================== + +Here, we define module-level constructs that will form the structures +which we will be querying from the database. This structure, known as a +:ref:`Declarative Mapping `, defines at once both a +Python object model, as well as +:term:`database metadata` that describes +real SQL tables that exist, or will exist, in a particular database:: + + >>> from sqlalchemy import Column + >>> from sqlalchemy import ForeignKey + >>> from sqlalchemy import Integer + >>> from sqlalchemy import String + >>> from sqlalchemy.orm import declarative_base + >>> from sqlalchemy.orm import relationship + + >>> Base = declarative_base() + + >>> class User(Base): + ... __tablename__ = "user_account" + ... + ... id = Column(Integer, primary_key=True) + ... name = Column(String(30)) + ... fullname = Column(String) + ... + ... addresses = relationship( + ... "Address", back_populates="user", cascade="all, delete-orphan" + ... ) + ... + ... def __repr__(self): + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + + >>> class Address(Base): + ... __tablename__ = "address" + ... + ... id = Column(Integer, primary_key=True) + ... email_address = Column(String, nullable=False) + ... user_id = Column(Integer, ForeignKey("user_account.id"), nullable=False) + ... + ... user = relationship("User", back_populates="addresses") + ... + ... def __repr__(self): + ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" + +Above, the declarative mapping makes use of :class:`_schema.Column` objects +to define the basic units of data storage that will be in the database. +The :func:`_orm.relationship` construct defines linkages between two +:term:`mapped` classes, ``User`` and ``Address`` above. + +The schema contains necessary elements such as primary key constraints set up +by the :paramref:`_schema.Column.primary_key` parameter, a +:term:`foreign key constraint` configured using :class:`_schema.ForeignKey` +(which is used by :func:`_orm.relationship` as well), and datatypes for columns +including :class:`_types.Integer` and :class:`_types.String`. + +More on table metadata and an intro to ORM declared mapping is in the +Tutorial at :ref:`tutorial_working_with_metadata`. + +Step Two - Create an Engine +============================ + +The :class:`_engine.Engine` is a **factory** that can create new +database connections for us, which also holds onto connections inside +of a :ref:`Connection Pool ` for fast reuse. For learning +purposes, we normally use a :ref:`SQLite ` memory-only database +for convenience:: + + >>> from sqlalchemy import create_engine + >>> engine = create_engine("sqlite://", echo=True, future=True) + +.. tip:: + + The ``echo=True`` parameter indicates that SQL emitted by connections will + be logged to standard out. ``future=True`` is to ensure we are using + the latest SQLAlchemy :term:`2.0-style` APIs. + +A full intro to the :class:`_engine.Engine` starts at :ref:`tutorial_engine`. + +Step Three - Emit CREATE TABLE DDL +================================== + +Using our table metadata and our engine, we can generate our schema at once +in our target SQLite database, using a method called :meth:`_schema.MetaData.create_all`: + +.. sourcecode:: pycon+sql + + >>> Base.metadata.create_all(engine) + {opensql}BEGIN (implicit) + PRAGMA main.table_...info("user_account") + ... + PRAGMA main.table_...info("address") + ... + CREATE TABLE user_account ( + id INTEGER NOT NULL, + name VARCHAR(30), + fullname VARCHAR, + PRIMARY KEY (id) + ) + ... + CREATE TABLE address ( + id INTEGER NOT NULL, + email_address VARCHAR NOT NULL, + user_id INTEGER NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(user_id) REFERENCES user_account (id) + ) + ... + COMMIT + +A lot just happened from that bit of Python code we wrote. For a complete +overview of what's going on on with Table metadata, proceed in the +Tutorial at :ref:`tutorial_working_with_metadata`. + +Step Four - Create Objects and Persist +====================================== + +We are now ready to insert data in the database. We accomplish this by +creating instances of ``User`` and ``Address`` objects, which have +an ``__init__()`` method already as established automatically by the +declarative mapping process. We then pass them +to the database using an object called a :ref:`Session `, +which makes use of the :class:`_engine.Engine` to interact with the +database. The :meth:`_orm.Session.add_all` method is used here to add +multiple objects at once, and the :meth:`_orm.Session.commit` method +will be used to :ref:`flush ` any pending changes to the +database and then :ref:`commit ` the current database +transaction, which is always in progress whenever the :class:`_orm.Session` +is used: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.orm import Session + + >>> with Session(engine) as session: + ... + ... spongebob = User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ) + ... sandy = User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="sandy@squirrelpower.org"), + ... ], + ... ) + ... patrick = User(name="patrick", fullname="Patrick Star") + ... + ... session.add_all([spongebob, sandy, patrick]) + ... + ... session.commit() + {opensql}BEGIN (implicit) + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('spongebob', 'Spongebob Squarepants') + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('sandy', 'Sandy Cheeks') + INSERT INTO user_account (name, fullname) VALUES (?, ?) + [...] ('patrick', 'Patrick Star') + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('spongebob@sqlalchemy.org', 1) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('sandy@sqlalchemy.org', 2) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('sandy@squirrelpower.org', 2) + COMMIT + + +.. tip:: + + It's recommended that the :class:`_orm.Session` be used in context + manager style as above, that is, using the Python ``with:`` statement. + The :class:`_orm.Session` object represents active database resources + so it's good to make sure it's closed out when a series of operations + are completed. In the next section, we'll keep a :class:`_orm.Session` + opened just for illustration purposes. + +Basics on creating a :class:`_orm.Session` are at +:ref:`tutorial_executing_orm_session` and more at :ref:`session_basics`. + +Then, some varieties of basic persistence operations are introduced +at :ref:`tutorial_inserting_orm`. + +Step Five - Simple SELECT +========================== + +With some rows in the database, here's the simplest form of emitting a SELECT +statement to load some objects. To create SELECT statements, we use the +:func:`_sql.select` function to create a new :class:`_sql.Select` object, which +we then invoke using a :class:`_orm.Session`. The method that is often useful +when querying for ORM objects is the :meth:`_orm.Session.scalars` method, which +will return a :class:`_result.ScalarResult` object that will iterate through +the ORM objects we've selected: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import select + + >>> session = Session(engine) + + >>> stmt = select(User).where(User.name.in_(["spongebob", "sandy"])) + + >>> for user in session.scalars(stmt): + ... print(user) + {opensql}BEGIN (implicit) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + WHERE user_account.name IN (?, ?) + [...] ('spongebob', 'sandy'){stop} + User(id=1, name='spongebob', fullname='Spongebob Squarepants') + User(id=2, name='sandy', fullname='Sandy Cheeks') + + +The above query also made use of the :meth:`_sql.Select.where` method +to add WHERE criteria, and also used the :meth:`_sql.ColumnOperators.in_` +method that's part of all SQLAlchemy column-like constructs to use the +SQL IN operator. + +More detail on how to select objects and individual columns is at +:ref:`tutorial_selecting_orm_entities`. + +Step Six - SELECT with JOIN +=========================== + +It's very common to query amongst multiple tables at once, and in SQL +the JOIN keyword is the primary way this happens. The :class:`_sql.Select` +construct creates joins using the :meth:`_sql.Select.join` method: + +.. sourcecode:: pycon+sql + + >>> stmt = ( + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "sandy") + ... .where(Address.email_address == "sandy@sqlalchemy.org") + ... ) + >>> sandy_address = session.scalars(stmt).one() + {opensql}SELECT address.id, address.email_address, address.user_id + FROM address JOIN user_account ON user_account.id = address.user_id + WHERE user_account.name = ? AND address.email_address = ? + [...] ('sandy', 'sandy@sqlalchemy.org') + {stop} + >>> sandy_address + Address(id=2, email_address='sandy@sqlalchemy.org') + +The above query illustrates multiple WHERE criteria which are automatically +chained together using AND, as well as how to use SQLAlchemy column-like +objects to create "equality" comparisons, which uses the overridden Python +method :meth:`_sql.ColumnOperators.__eq__` to produce a SQL criteria object. + +Some more background on the concepts above are at +:ref:`tutorial_select_where_clause` and :ref:`tutorial_select_join`. + +Step Seven - Make Changes +========================= + +The :class:`_orm.Session` object, in conjunction with our ORM-mapped classes +``User`` and ``Address``, automatically track changes to the objects as they +are made, which result in SQL statements that will be emitted the next +time the :class:`_orm.Session` flushes. Below, we change one email +address associated with "sandy", and also add a new email address to +"patrick", after emitting a SELECT to retrieve the row for "patrick": + +.. sourcecode:: pycon+sql + + >>> stmt = select(User).where(User.name == "patrick") + >>> patrick = session.scalars(stmt).one() + {opensql}SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + WHERE user_account.name = ? + [...] ('patrick',) + {stop} + + >>> patrick.addresses.append( + ... Address(email_address="patrickstar@sqlalchemy.org") + ... ) + {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (3,){stop} + + >>> sandy_address.email_address = "sandy_cheeks@sqlalchemy.org" + + >>> session.commit() + {opensql}UPDATE address SET email_address=? WHERE address.id = ? + [...] ('sandy_cheeks@sqlalchemy.org', 2) + INSERT INTO address (email_address, user_id) VALUES (?, ?) + [...] ('patrickstar@sqlalchemy.org', 3) + COMMIT + {stop} + +Notice when we accessed ``patrick.addresses``, a SELECT was emitted. This is +called a :term:`lazy load`. Background on different ways to access related +items using more or less SQL is introduced at :ref:`tutorial_orm_loader_strategies`. + +A detailed walkthrough on ORM data manipulation starts at +:ref:`tutorial_orm_data_manipulation`. + +Step Eight - Some Deletes +========================= + +All things must come to an end, as is the case for some of our database +rows - here's a quick demonstration of two different forms of deletion, both +of which are important based on the specific use case. + +First we will remove one of the ``Address`` objects from the "sandy" user. +When the :class:`_orm.Session` next flushes, this will result in the +row being deleted. This behavior is something that we configured in our +mapping called the :ref:`delete cascade `. We can get a handle to the ``sandy`` +object by primary key using :meth:`_orm.Session.get`, then work with the object: + +.. sourcecode:: pycon+sql + + >>> sandy = session.get(User, 2) + {opensql}BEGIN (implicit) + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name, user_account.fullname AS user_account_fullname + FROM user_account + WHERE user_account.id = ? + [...] (2,){stop} + + >>> sandy.addresses.remove(sandy_address) + {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (2,) + +The last SELECT above was the :term:`lazy load` operation proceeding so that +the ``sandy.addresses`` collection could be loaded, so that we could remove the +``sandy_address`` member. There are other ways to go about this series +of operations that won't emit as much SQL. + +We can choose to emit the DELETE SQL for what's set to be changed so far, without +committing the transaction, using the +:meth:`_orm.Session.flush` method: + +.. sourcecode:: pycon+sql + + >>> session.flush() + {opensql}DELETE FROM address WHERE address.id = ? + [...] (2,) + +Next, we will delete the "patrick" user entirely. For a top-level delete of +an object by itself, we use the :meth:`_orm.Session.delete` method; this +method doesn't actually perform the deletion, but sets up the object +to be deleted on the next flush. The +operation will also :term:`cascade` to related objects based on the cascade +options that we configured, in this case, onto the related ``Address`` objects: + +.. sourcecode:: pycon+sql + + >>> session.delete(patrick) + {opensql}SELECT user_account.id AS user_account_id, user_account.name AS user_account_name, user_account.fullname AS user_account_fullname + FROM user_account + WHERE user_account.id = ? + [...] (3,) + SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id + FROM address + WHERE ? = address.user_id + [...] (3,) + +The :meth:`_orm.Session.delete` method in this particular case emitted two +SELECT statements, even though it didn't emit a DELETE, which might seem surprising. +This is because when the method went to inspect the object, it turns out the +``patrick`` object was :term:`expired`, which happened when we last called upon +:meth:`_orm.Session.commit`, and the SQL emitted was to re-load the rows +from the new transaction. This expiration is optional, and in normal +use we will often be turning it off for situations where it doesn't apply well. + +To illustrate the rows being deleted, here's the commit: + +.. sourcecode:: pycon+sql + + >>> session.commit() + {opensql}DELETE FROM address WHERE address.id = ? + [...] (4,) + DELETE FROM user_account WHERE user_account.id = ? + [...] (3,) + COMMIT + {stop} + +The Tutorial discusses ORM deletion at :ref:`tutorial_orm_deleting`. +Background on object expiration is at :ref:`session_expiring`; cascades +are discussed in depth at :ref:`unitofwork_cascades`. + +Step Nine - Learn the above concepts in depth +============================================= + +For a new user, the above sections were likely a whirlwind tour. There's a +lot of important concepts in each step above that weren't covered. With a +quick overview of what things look like, it's recommended to work through +the :ref:`unified_tutorial` to gain a solid working knowledge of what's +really going on above. Good luck! + + + + + diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 529f786c1af..d60db4d73c7 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -40,6 +40,7 @@ caveats. It's intended that usually, you'd re-associate detached objects with another :class:`.Session` when you want to work with them again, so that they can resume their normal task of representing database state. +.. _session_basics: Basics of Using a Session ========================= @@ -457,6 +458,7 @@ required after a flush fails, even though the underlying transaction will have been rolled back already - this is so that the overall nesting pattern of so-called "subtransactions" is consistently maintained. +.. _session_expiring: Expiring / Refreshing --------------------- diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index 9f7aafc1b2e..c8fac288e62 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -156,6 +156,20 @@ The above :class:`_engine.Row` has just one element, representing the ``User`` e >>> row[0] User(id=1, name='spongebob', fullname='Spongebob Squarepants') +A highly recommended convenience method of achieving the same result as above +is to use the :meth:`_orm.Session.scalars` method to execute the statement +directly; this method will return a :class:`_result.ScalarResult` object +that delivers the first "column" of each row at once, in this case, +instances of the ``User`` class:: + + >>> user = session.scalars(select(User)).first() + {opensql}SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + >>> user + User(id=1, name='spongebob', fullname='Spongebob Squarepants') + + Alternatively, we can select individual columns of an ORM entity as distinct elements within result rows, by using the class-bound attributes; when these are passed to a construct such as :func:`_sql.select`, they are resolved into diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 16768da2b9f..0249702ef6b 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -525,6 +525,11 @@ than that, however understanding that it has a :meth:`_orm.Session.execute` method that's used the same way as :meth:`_future.Connection.execute` will get us started with the examples that follow later. +.. seealso:: + + :ref:`session_basics` - presents basic creational and usage patterns with + the :class:`_orm.Session` object. + diff --git a/test/base/test_tutorials.py b/test/base/test_tutorials.py index 494b8a0f67d..74011d3d494 100644 --- a/test/base/test_tutorials.py +++ b/test/base/test_tutorials.py @@ -115,6 +115,9 @@ def test_core_operators(self): def test_orm_queryguide(self): self._run_doctest("orm/queryguide.rst") + def test_orm_quickstart(self): + self._run_doctest("orm/quickstart.rst") + # unicode checker courtesy pytest From f7cea6e9638a5e452a03ed43fd5752bd21c281af Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 13:40:16 -0500 Subject: [PATCH 151/632] add copybutton works great, including for plain code plus prompt code with SQL (copies only the prompt code when prompts are present). added some styling to zzzeeksphinx Change-Id: I1b94b0488689e875adfb90ec171e04f7e8022415 (cherry picked from commit 7a48471f6482576ac50abab35ce4fa64c1f633b4) --- doc/build/conf.py | 7 +++++++ doc/build/requirements.txt | 1 + 2 files changed, 8 insertions(+) diff --git a/doc/build/conf.py b/doc/build/conf.py index f29349c7955..7f55f5208fc 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -36,6 +36,7 @@ "zzzeeksphinx", "changelog", "sphinx_paramlinks", + "sphinx_copybutton", ] needs_extensions = {"zzzeeksphinx": "1.2.1"} @@ -44,6 +45,12 @@ # have reported this. templates_path = [os.path.abspath("templates")] +# https://sphinx-copybutton.readthedocs.io/en/latest/use.html#strip-and-configure-input-prompts-for-code-cells +copybutton_prompt_text = ( + r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +) +copybutton_prompt_is_regexp = True + nitpicky = False # The suffix of source filenames. diff --git a/doc/build/requirements.txt b/doc/build/requirements.txt index f3e40e01fd9..c5871d21241 100644 --- a/doc/build/requirements.txt +++ b/doc/build/requirements.txt @@ -1,3 +1,4 @@ git+https://github.com/sqlalchemyorg/changelog.git#egg=changelog git+https://github.com/sqlalchemyorg/sphinx-paramlinks.git#egg=sphinx-paramlinks git+https://github.com/sqlalchemyorg/zzzeeksphinx.git#egg=zzzeeksphinx +sphinx-copybutton \ No newline at end of file From 555ab4b95ab5e1a24c7c7e8dbff219d36930a1b5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 14:07:09 -0500 Subject: [PATCH 152/632] fix section format headings, remove "Step: " Change-Id: I25a837cf866b152a30aa373d07f704e0cc11d497 (cherry picked from commit 575a5e44bae4e587ff19d4137e54a7888ee08e41) --- doc/build/orm/quickstart.rst | 38 +++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index d670fadb966..95cace9183a 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -14,8 +14,8 @@ proceed to the full :ref:`unified_tutorial` for a much more in-depth description of each of the concepts being illustrated here. -Step One - Declare Models -========================== +Declare Models +--------------- Here, we define module-level constructs that will form the structures which we will be querying from the database. This structure, known as a @@ -73,8 +73,9 @@ including :class:`_types.Integer` and :class:`_types.String`. More on table metadata and an intro to ORM declared mapping is in the Tutorial at :ref:`tutorial_working_with_metadata`. -Step Two - Create an Engine -============================ +Create an Engine +------------------ + The :class:`_engine.Engine` is a **factory** that can create new database connections for us, which also holds onto connections inside @@ -93,8 +94,9 @@ for convenience:: A full intro to the :class:`_engine.Engine` starts at :ref:`tutorial_engine`. -Step Three - Emit CREATE TABLE DDL -================================== +Emit CREATE TABLE DDL +---------------------- + Using our table metadata and our engine, we can generate our schema at once in our target SQLite database, using a method called :meth:`_schema.MetaData.create_all`: @@ -128,8 +130,8 @@ A lot just happened from that bit of Python code we wrote. For a complete overview of what's going on on with Table metadata, proceed in the Tutorial at :ref:`tutorial_working_with_metadata`. -Step Four - Create Objects and Persist -====================================== +Create Objects and Persist +--------------------------- We are now ready to insert data in the database. We accomplish this by creating instances of ``User`` and ``Address`` objects, which have @@ -199,8 +201,8 @@ Basics on creating a :class:`_orm.Session` are at Then, some varieties of basic persistence operations are introduced at :ref:`tutorial_inserting_orm`. -Step Five - Simple SELECT -========================== +Simple SELECT +-------------- With some rows in the database, here's the simplest form of emitting a SELECT statement to load some objects. To create SELECT statements, we use the @@ -237,8 +239,8 @@ SQL IN operator. More detail on how to select objects and individual columns is at :ref:`tutorial_selecting_orm_entities`. -Step Six - SELECT with JOIN -=========================== +SELECT with JOIN +----------------- It's very common to query amongst multiple tables at once, and in SQL the JOIN keyword is the primary way this happens. The :class:`_sql.Select` @@ -269,8 +271,8 @@ method :meth:`_sql.ColumnOperators.__eq__` to produce a SQL criteria object. Some more background on the concepts above are at :ref:`tutorial_select_where_clause` and :ref:`tutorial_select_join`. -Step Seven - Make Changes -========================= +Make Changes +------------ The :class:`_orm.Session` object, in conjunction with our ORM-mapped classes ``User`` and ``Address``, automatically track changes to the objects as they @@ -314,8 +316,8 @@ items using more or less SQL is introduced at :ref:`tutorial_orm_loader_strategi A detailed walkthrough on ORM data manipulation starts at :ref:`tutorial_orm_data_manipulation`. -Step Eight - Some Deletes -========================= +Some Deletes +------------ All things must come to an end, as is the case for some of our database rows - here's a quick demonstration of two different forms of deletion, both @@ -400,8 +402,8 @@ The Tutorial discusses ORM deletion at :ref:`tutorial_orm_deleting`. Background on object expiration is at :ref:`session_expiring`; cascades are discussed in depth at :ref:`unitofwork_cascades`. -Step Nine - Learn the above concepts in depth -============================================= +Learn the above concepts in depth +--------------------------------- For a new user, the above sections were likely a whirlwind tour. There's a lot of important concepts in each step above that weren't covered. With a From ecc2bbf136bad77f704f33b2f67428ca5f49ded6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 14:10:17 -0500 Subject: [PATCH 153/632] take quickstart out of main TOC for now looks kind of awkward and isn't reference Change-Id: I4fb664f79b792a32c6695a9cae7b1845a3044271 (cherry picked from commit d866eec2adcdfa2dd01e4c12d11cc2702ab2ff5e) --- doc/build/orm/index.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/orm/index.rst b/doc/build/orm/index.rst index e37f472965a..9ec160e5cd4 100644 --- a/doc/build/orm/index.rst +++ b/doc/build/orm/index.rst @@ -12,7 +12,6 @@ tutorial. :maxdepth: 2 tutorial - quickstart mapper_config relationships loading_objects @@ -20,3 +19,8 @@ tutorial. extending extensions/index examples + +.. toctree:: + :hidden: + + quickstart From b13b7d36e1cbdaba60ac7eafbf1e1cd6e005056e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 14:59:04 -0500 Subject: [PATCH 154/632] dispose session outside of child proc disposing inside the child proc can interfere with the parent process. we likely never considered this. Fixes: #7815 Change-Id: I6ad0e5840655ed99a9d30002eba280c8e44a5c2e (cherry picked from commit 936f0b2fc226171a81df3ca66c269750e7e52436) --- doc/build/core/pooling.rst | 41 ++++++++++++++++++++++++++++---------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 878a9ccab6f..91135bf5b09 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -479,19 +479,34 @@ are three general approaches to this: engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) -2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine` as - soon one is within the new process. In Python multiprocessing, constructs - such as ``multiprocessing.Pool`` include "initializer" hooks which are a - place that this can be performed; otherwise at the top of where - ``os.fork()`` or where the ``Process`` object begins the child fork, a - single call to :meth:`_engine.Engine.dispose` will ensure any remaining - connections are flushed. **This is the recommended approach**:: +2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine` + **directly before** the new process is started, so that the new process + will create new connections, as well as not attempt to close connections that + were shared from the parent which can impact the parent's subsequent + use of those connections. **This is the recommended approach**:: engine = create_engine("mysql://user:pass@host/dbname") def run_in_process(): - # process starts. ensure engine.dispose() is called just once - # at the beginning + with engine.connect() as conn: + conn.execute(text("...")) + + # before process starts, ensure engine.dispose() is called + engine.dispose() + p = Process(target=run_in_process) + p.start() + +3. Alternatively, if the :class:`_engine.Engine` is only to be used in + child processes, and will not be used from the parent process subsequent + to the creation of child forks, the dispose may be within the child process + right as it begins:: + + engine = create_engine("mysql+mysqldb://user:pass@host/dbname") + + def run_in_process(): + # process starts. ensure engine.dispose() is called just once + # at the beginning. note this cause parent process connections + # to be closed for most drivers engine.dispose() with engine.connect() as conn: @@ -500,10 +515,14 @@ are three general approaches to this: p = Process(target=run_in_process) p.start() -3. An event handler can be applied to the connection pool that tests for + # after child process starts, "engine" above should not be used within + # the parent process for connectivity, without calling + # engine.dispose() first + +4. An event handler can be applied to the connection pool that tests for connections being shared across process boundaries, and invalidates them. This approach, **when combined with an explicit call to dispose() as - mentioned above**, should cover all cases:: + mentioned above in options 2 or 3**, should cover all cases:: from sqlalchemy import event from sqlalchemy import exc From 45645cfe30049a14534e120aaffc1577486ab716 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 16:01:09 -0500 Subject: [PATCH 155/632] further simplify pool-sharing doc 1. the event based approach doesn't require dispose() to be called at all, and the note that the "pool will hang" makes no sense. I have no idea what that refers towards 2. the subsequent paragraph about connections and sessions is unintelligible. old paragraphs like these are likely why people complain about the docs so much. try to just say "don't do this", as that is easier than trying to explain to use connection.invalidate() etc. Change-Id: Id840c65a2f71583ced4dc82fd8690e7da4c4b10e (cherry picked from commit b098d2a8b9c8a6fd1ddc5dce1eca7e70ace3f545) --- doc/build/core/pooling.rst | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 91135bf5b09..6b2735a5d48 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -520,9 +520,7 @@ are three general approaches to this: # engine.dispose() first 4. An event handler can be applied to the connection pool that tests for - connections being shared across process boundaries, and invalidates them. - This approach, **when combined with an explicit call to dispose() as - mentioned above in options 2 or 3**, should cover all cases:: + connections being shared across process boundaries, and invalidates them:: from sqlalchemy import event from sqlalchemy import exc @@ -550,22 +548,15 @@ are three general approaches to this: originated in a different parent process as an "invalid" connection, coercing the pool to recycle the connection record to make a new connection. - When using the above recipe, **ensure the dispose approach from #2 is also - used**, as if the connection pool is exhausted in the parent process - when the fork occurs, an empty pool will be copied into - the child process which will then hang because it has no connections. - The above strategies will accommodate the case of an :class:`_engine.Engine` -being shared among processes. However, for the case of a transaction-active -:class:`.Session` or :class:`_engine.Connection` being shared, there's no automatic -fix for this; an application needs to ensure a new child process only -initiate new :class:`_engine.Connection` objects and transactions, as well as ORM -:class:`.Session` objects. For a :class:`.Session` object, technically -this is only needed if the session is currently transaction-bound, however -the scope of a single :class:`.Session` is in any case intended to be -kept within a single call stack in any case (e.g. not a global object, not -shared between processes or threads). - +being shared among processes. The above steps alone are not sufficient for the +case of sharing a specific :class:`_engine.Connection` over a process boundary; +prefer to keep the scope of a particular :class:`_engine.Connection` local to a +single process (and thread). It's additionally not supported to share any kind +of ongoing transactional state directly across a process boundary, such as an +ORM :class:`_orm.Session` object that's begun a transaction and references +active :class:`_orm.Connection` instances; again prefer to create new +:class:`_orm.Session` objects in new processes. API Documentation - Available Pool Implementations From d7caa3c0bef64c710e45ed646756da65c4241bdd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Mar 2022 16:23:14 -0500 Subject: [PATCH 156/632] restore quickstart to toc this was coming out wrong due to the wrong header level on the page. it needs to be in a visible toc so that the inner elements show up on the sidebar / top mobile nav. Change-Id: I13acbe0d82c6a839230bc2e2454e4ab82e4879e6 (cherry picked from commit df056af49c51dcbcd70eb13ead5c3d8588c08235) --- doc/build/orm/index.rst | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/doc/build/orm/index.rst b/doc/build/orm/index.rst index 9ec160e5cd4..ee0eaf80547 100644 --- a/doc/build/orm/index.rst +++ b/doc/build/orm/index.rst @@ -11,6 +11,7 @@ tutorial. .. toctree:: :maxdepth: 2 + quickstart tutorial mapper_config relationships @@ -20,7 +21,3 @@ tutorial. extensions/index examples -.. toctree:: - :hidden: - - quickstart From bff4733872e4fc1ab6fc8a8a5b03d41d80fca575 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Mon, 14 Mar 2022 11:26:54 -0600 Subject: [PATCH 157/632] Fix "Ambiguous column name" error on FK reflection Fixes: #7812 Change-Id: Ic16eff9a9201d34515cb8eb884270eced4e1196a (cherry picked from commit 84a78a97b5903dc246155a63ee8514385ed95d7c) --- doc/build/changelog/unreleased_14/7812.rst | 7 +++++++ lib/sqlalchemy/dialects/mssql/base.py | 3 ++- 2 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7812.rst diff --git a/doc/build/changelog/unreleased_14/7812.rst b/doc/build/changelog/unreleased_14/7812.rst new file mode 100644 index 00000000000..7e28428acfa --- /dev/null +++ b/doc/build/changelog/unreleased_14/7812.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mssql, regression + :tickets: 7812 + + Fixed regression caused by :ticket:`7160` where FK reflection on a very + old database (compatibility level 80: SQL Server 2000) causes an + "Ambiguous column name" error. Patch courtesy of @Lin-Your. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 2006763b13b..40c06ff0080 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -3450,7 +3450,8 @@ def get_foreign_keys( AND index_info.index_name = fk_info.unique_constraint_name AND index_info.ordinal_position = fk_info.ordinal_position - ORDER BY constraint_schema, constraint_name, ordinal_position + ORDER BY fk_info.constraint_schema, fk_info.constraint_name, + fk_info.ordinal_position """ ) .bindparams( From bb39c1606d211c1b5bd5a9cf294f0bf834f7f26c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 16 Mar 2022 12:37:20 -0400 Subject: [PATCH 158/632] remove intermediary _is_clone_of entries when cloning Improvements in memory usage by the ORM, removing a significant set of intermediary expression objects that are typically stored when a copy of an expression object is created. These clones have been greatly reduced, reducing the number of total expression objects stored in memory by ORM mappings by about 30%. note this change causes the tests to have a bit of a harder time with GC, which we would assume is because mappings now have a lot more garbage to clean up after mappers are configured. it remains to be seen what the long term effects of this are. Fixes: #7823 Change-Id: If8729747ffb9bf27e8974f069a994b5a823ee095 (cherry picked from commit b8db80e9ea917e4770c78feff092044d386985c6) --- doc/build/changelog/unreleased_14/7823.rst | 9 +++++++ lib/sqlalchemy/sql/elements.py | 3 ++- lib/sqlalchemy/sql/selectable.py | 3 ++- test/aaa_profiling/test_memusage.py | 11 ++++++++ test/orm/test_hasparent.py | 29 ++++++++++++++++++++-- test/orm/test_session.py | 15 +++++++++++ 6 files changed, 66 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7823.rst diff --git a/doc/build/changelog/unreleased_14/7823.rst b/doc/build/changelog/unreleased_14/7823.rst new file mode 100644 index 00000000000..249a749d027 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7823.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, performance + :tickets: 7823 + + Improvements in memory usage by the ORM, removing a significant set of + intermediary expression objects that are typically stored when a copy of an + expression object is created. These clones have been greatly reduced, + reducing the number of total expression objects stored in memory by + ORM mappings by about 30%. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 0aab04139f1..fbb02d9258c 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -248,7 +248,8 @@ def _clone(self, **kw): # process leaves around a lot of remnants of the previous clause # typically in the form of column expressions still attached to the # old table. - c._is_clone_of = self + cc = self._is_clone_of + c._is_clone_of = cc if cc is not None else self return c diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 51e0ae1578e..5516898a83f 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -4773,7 +4773,8 @@ class _MemoizedSelectEntities( def _clone(self, **kw): c = self.__class__.__new__(self.__class__) c.__dict__ = {k: v for k, v in self.__dict__.items()} - c._is_clone_of = self + + c._is_clone_of = self.__dict__.get("_is_clone_of", self) return c @classmethod diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index 624c12ea225..c842b593016 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -355,6 +355,17 @@ def go(): go() + def test_clone_expression(self): + + root_expr = column("x", Integer) == 12 + expr = [root_expr] + + @profile_memory() + def go(): + expr[0] = cloned_traverse(expr[0], {}, {}) + + go() + class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed): diff --git a/test/orm/test_hasparent.py b/test/orm/test_hasparent.py index 425dd947d4c..8f61c11970d 100644 --- a/test/orm/test_hasparent.py +++ b/test/orm/test_hasparent.py @@ -1,5 +1,4 @@ """test the current state of the hasparent() flag.""" - from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import testing @@ -26,6 +25,10 @@ class ParentRemovalTest(fixtures.MappedTest): run_inserts = None + # trying to push GC to do a better job + run_setup_classes = "each" + run_setup_mappers = "each" + @classmethod def define_tables(cls, metadata): if testing.against("oracle"): @@ -173,12 +176,24 @@ def test_stale_state_negative_child_expired(self): """ User = self.classes.User s, u1, a1 = self._fixture() + gc_collect() u2 = User(addresses=[a1]) # noqa s.expire(a1) u1.addresses.remove(a1) + u2_is = u2._sa_instance_state + del u2 + + for i in range(5): + gc_collect() + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + o = u2_is.obj() + assert o is None + # controversy here. The action is # to expire one object, not the other, and remove; # this is pretty abusive in any case. for now @@ -192,13 +207,23 @@ def test_stale_state_negative_child_expired(self): def test_stale_state_negative(self): User = self.classes.User s, u1, a1 = self._fixture() + gc_collect() u2 = User(addresses=[a1]) s.add(u2) s.flush() s._expunge_states([attributes.instance_state(u2)]) + + u2_is = u2._sa_instance_state del u2 - gc_collect() + + for i in range(5): + gc_collect() + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + o = u2_is.obj() + assert o is None assert_raises_message( orm_exc.StaleDataError, diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 607c0a9edcd..295fd8205f3 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -1534,14 +1534,22 @@ def test_weakref(self): s = fixture_session() self.mapper_registry.map_imperatively(User, users) + gc_collect() s.add(User(name="ed")) s.flush() assert not s.dirty user = s.query(User).one() + + # heisenberg the GC a little bit, since #7823 caused a lot more + # GC when mappings are set up, larger test suite started failing + # on this being gc'ed + user_is = user._sa_instance_state del user gc_collect() + assert user_is.obj() is None + assert len(s.identity_map) == 0 user = s.query(User).one() @@ -1566,6 +1574,7 @@ def test_weakref_pickled(self): s = fixture_session() self.mapper_registry.map_imperatively(User, users) + gc_collect() s.add(User(name="ed")) s.flush() @@ -1608,6 +1617,8 @@ def test_weakref_with_cycles_o2m(self): properties={"addresses": relationship(Address, backref="user")}, ) self.mapper_registry.map_imperatively(Address, addresses) + gc_collect() + s.add(User(name="ed", addresses=[Address(email_address="ed1")])) s.commit() @@ -1648,6 +1659,8 @@ def test_weakref_with_cycles_o2o(self): }, ) self.mapper_registry.map_imperatively(Address, addresses) + gc_collect() + s.add(User(name="ed", address=Address(email_address="ed1"))) s.commit() @@ -1675,6 +1688,7 @@ def test_auto_detach_on_gc_session(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) + gc_collect() sess = Session(testing.db) @@ -1706,6 +1720,7 @@ def test_fast_discard_race(self): users, User = self.tables.users, self.classes.User self.mapper_registry.map_imperatively(User, users) + gc_collect() sess = fixture_session() From 17781268fcf29b53693617c5be5ee8e11c6649c8 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Mon, 14 Mar 2022 18:03:29 -0600 Subject: [PATCH 159/632] Git ignore lib/sqlalchemy/cyextension for 1.4 Change-Id: Ic5978375278ef6bd49d8ae15a7d6452db1870365 --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index c566ded772b..01d436800c5 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,7 @@ test/test_schema.db /.ipynb_checkpoints/ *.ipynb /querytest.db -/.mypy_cache /.pytest_cache /db_idents.txt +# items that only belong in the 2.0 branch +/lib/sqlalchemy/cyextension From 7a93686ae47aca374a6ed29fdf1ae6cb5451cc63 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 18 Mar 2022 10:33:40 -0400 Subject: [PATCH 160/632] catch unexpected errors when accessing clslevel attribute Improved the error message that's raised for the case where the :func:`.association_proxy` construct attempts to access a target attribute at the class level, and this access fails. The particular use case here is when proxying to a hybrid attribute that does not include a working class-level implementation. Fixes: #7827 Change-Id: Ic6ff9df010f49253e664a1e7c7e16d8546006965 (cherry picked from commit 764e36e5e7b7faf1a97b4b06be1ca307ac4fce46) --- doc/build/changelog/unreleased_14/7827.rst | 10 ++++++++++ lib/sqlalchemy/ext/associationproxy.py | 16 ++++++++++++++++ test/ext/test_associationproxy.py | 22 ++++++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7827.rst diff --git a/doc/build/changelog/unreleased_14/7827.rst b/doc/build/changelog/unreleased_14/7827.rst new file mode 100644 index 00000000000..aedf25809d7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7827.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, ext + :tickets: 7827 + + Improved the error message that's raised for the case where the + :func:`.association_proxy` construct attempts to access a target attribute + at the class level, and this access fails. The particular use case here is + when proxying to a hybrid attribute that does not include a working + class-level implementation. + diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index 9a73bb5c2cd..fbf377afd44 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -383,6 +383,22 @@ def for_proxy(cls, parent, owning_class, parent_instance): return AmbiguousAssociationProxyInstance( parent, owning_class, target_class, value_attr ) + except Exception as err: + util.raise_( + exc.InvalidRequestError( + "Association proxy received an unexpected error when " + "trying to retreive attribute " + '"%s.%s" from ' + 'class "%s": %s' + % ( + target_class.__name__, + parent.value_attr, + target_class.__name__, + err, + ) + ), + from_=err, + ) else: return cls._construct_for_assoc( target_assoc, parent, owning_class, target_class, value_attr diff --git a/test/ext/test_associationproxy.py b/test/ext/test_associationproxy.py index 0b05fe0387e..44f3890de88 100644 --- a/test/ext/test_associationproxy.py +++ b/test/ext/test_associationproxy.py @@ -34,6 +34,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.mock import call from sqlalchemy.testing.mock import Mock @@ -3343,6 +3344,10 @@ class A(Base): b_data = association_proxy("bs", "value") well_behaved_b_data = association_proxy("bs", "well_behaved_value") + fails_on_class_access = association_proxy( + "bs", "fails_on_class_access" + ) + class B(Base): __tablename__ = "b" @@ -3386,6 +3391,10 @@ def well_behaved_w_expr(self, value): def well_behaved_w_expr(cls): return cast(cls.data, Integer) + @hybrid_property + def fails_on_class_access(self): + return len(self.data) + class C(Base): __tablename__ = "c" @@ -3394,6 +3403,19 @@ class C(Base): _b = relationship("B") attr = association_proxy("_b", "well_behaved_w_expr") + def test_msg_fails_on_cls_access(self): + A, B = self.classes("A", "B") + + a1 = A(bs=[B(data="b1")]) + + with expect_raises_message( + exc.InvalidRequestError, + "Association proxy received an unexpected error when trying to " + 'retreive attribute "B.fails_on_class_access" from ' + r'class "B": .* no len\(\)', + ): + a1.fails_on_class_access + def test_get_ambiguous(self): A, B = self.classes("A", "B") From 4373bd215878a2ec8216ade74007994ddc0a554f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 21 Mar 2022 09:07:51 -0400 Subject: [PATCH 161/632] add note that schema translate not supported per object Fixes: #7832 Change-Id: I241e4cbd1b5d6df46e49e29e4ab1cac021f2895c (cherry picked from commit 75a601e2fda7b849b858455eaa17d772bd2381e5) --- doc/build/core/connections.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 97191f5aaa6..aaf118b1bf4 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -941,6 +941,14 @@ as the schema name is passed to these methods explicitly. session = Session(schema_engine) + When using the ORM, the schema translate feature is only supported as + **a single schema translate map per Session**. It will **not work** if + different schema translate maps are given on a per-statement basis, as + the ORM :class:`_orm.Session` does not take current schema translate + values into account for individual objects. In other words, all + objects loaded in a particular :class:`_orm.Session` must be based on the + **same** ``schema_translate_map``. + ... From 8789171595507c2e43cd7ddc7dd4370b48130f4e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 21 Mar 2022 16:29:59 -0400 Subject: [PATCH 162/632] additional updates to stream results note Change-Id: I9d7ed9f412a2d9384f6c0b50316df79b6f8f481a (cherry picked from commit 512807f02d7aa6c4074910f1d0fba2187f50ee8f) --- doc/build/core/connections.rst | 19 +++++++++++-------- lib/sqlalchemy/orm/query.py | 12 ++++++++++-- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index aaf118b1bf4..dc065275736 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -941,16 +941,19 @@ as the schema name is passed to these methods explicitly. session = Session(schema_engine) - When using the ORM, the schema translate feature is only supported as - **a single schema translate map per Session**. It will **not work** if - different schema translate maps are given on a per-statement basis, as - the ORM :class:`_orm.Session` does not take current schema translate - values into account for individual objects. In other words, all - objects loaded in a particular :class:`_orm.Session` must be based on the - **same** ``schema_translate_map``. - ... + .. warning:: + + When using the ORM, the schema translate feature is only supported as + **a single schema translate map per Session**. It will **not work** if + different schema translate maps are given on a per-statement basis, as + the ORM :class:`_orm.Session` does not take current schema translate + values into account for individual objects. In other words, all + objects loaded in a particular :class:`_orm.Session` must be based on the + **same** ``schema_translate_map``. + + .. versionadded:: 1.1 diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index ab230f66f1d..fe06b531358 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1557,12 +1557,20 @@ def execution_options(self, **kwargs): automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method or execution option is used. + .. versionadded:: 1.4 - added ORM options to + :meth:`_orm.Query.execution_options` + The execution options may also be specified on a per execution basis when using :term:`2.0 style` queries via the :paramref:`_orm.Session.execution_options` parameter. - .. versionadded:: 1.4 - added ORM options to - :meth:`_orm.Query.execution_options` + .. warning:: The + :paramref:`_engine.Connection.execution_options.stream_results` + parameter should not be used at the level of individual ORM + statement executions, as the :class:`_orm.Session` will not track + objects from different schema translate maps within a single + session. + .. seealso:: From 0b11385a0517ef9f63ce73b713d4d1e335c9addd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 21 Mar 2022 17:35:26 -0400 Subject: [PATCH 163/632] note that horizontal sharding supports multi schema translates the horizontal sharding API needs some work as it is still exposing some legacy details, but in any case illustrate how we can, for the moment, to use multiple schema translate maps in a single session. A lot more cleanup is needed in horizontal sharding, see #7837 Change-Id: Ia925e2226ecee9d747a8c4fc1772917f10bc505f References: #7832 References: #7837 (cherry picked from commit 2a1afd32b82e103961f305b478d4cc6f6244cd00) --- doc/build/core/connections.rst | 11 +- examples/sharding/separate_databases.py | 26 +- .../sharding/separate_schema_translates.py | 243 ++++++++++++++++++ examples/sharding/separate_tables.py | 26 +- lib/sqlalchemy/orm/query.py | 3 +- 5 files changed, 277 insertions(+), 32 deletions(-) create mode 100644 examples/sharding/separate_schema_translates.py diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index dc065275736..6395c3c6b9c 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -945,15 +945,16 @@ as the schema name is passed to these methods explicitly. .. warning:: - When using the ORM, the schema translate feature is only supported as + When using the ORM :class:`_orm.Session` without extensions, the schema + translate feature is only supported as **a single schema translate map per Session**. It will **not work** if different schema translate maps are given on a per-statement basis, as the ORM :class:`_orm.Session` does not take current schema translate - values into account for individual objects. In other words, all - objects loaded in a particular :class:`_orm.Session` must be based on the - **same** ``schema_translate_map``. - + values into account for individual objects. + To use a single :class:`_orm.Session` with multiple ``schema_translate_map`` + configurations, the :ref:`horizontal_sharding_toplevel` extension may + be used. See the example at :ref:`examples_sharding`. .. versionadded:: 1.1 diff --git a/examples/sharding/separate_databases.py b/examples/sharding/separate_databases.py index 95f12fa722d..accbfd79b44 100644 --- a/examples/sharding/separate_databases.py +++ b/examples/sharding/separate_databases.py @@ -58,7 +58,7 @@ def id_generator(ctx): # in reality, might want to use a separate transaction for this. with db1.connect() as conn: nextid = conn.scalar(ids.select().with_for_update()) - conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1})) + conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid @@ -106,7 +106,7 @@ def __init__(self, temperature): # establish initial "id" in db1 with db1.begin() as conn: - conn.execute(ids.insert(), nextid=1) + conn.execute(ids.insert(), {"nextid": 1}) # step 5. define sharding functions. @@ -155,19 +155,19 @@ def id_chooser(query, ident): return ["north_america", "asia", "europe", "south_america"] -def query_chooser(query): - """query chooser. +def execute_chooser(context): + """statement execution chooser. - this also returns a list of shard ids, which can - just be all of them. but here we'll search into the Query in order - to try to narrow down the list of shards to query. + this also returns a list of shard ids, which can just be all of them. but + here we'll search into the execution context in order to try to narrow down + the list of shards to SELECT. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids - for column, operator, value in _get_query_comparisons(query): + for column, operator, value in _get_select_comparisons(context.statement): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object @@ -186,8 +186,8 @@ def query_chooser(query): return ids -def _get_query_comparisons(query): - """Search an orm.Query object for binary expressions. +def _get_select_comparisons(statement): + """Search a Select or Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form @@ -222,9 +222,9 @@ def visit_binary(binary): # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. - if query.whereclause is not None: + if statement.whereclause is not None: visitors.traverse( - query.whereclause, + statement.whereclause, {}, { "bindparam": visit_bindparam, @@ -239,7 +239,7 @@ def visit_binary(binary): Session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, - query_chooser=query_chooser, + execute_chooser=execute_chooser, ) # save and load objects! diff --git a/examples/sharding/separate_schema_translates.py b/examples/sharding/separate_schema_translates.py new file mode 100644 index 00000000000..c4f2b9e25ce --- /dev/null +++ b/examples/sharding/separate_schema_translates.py @@ -0,0 +1,243 @@ +"""Illustrates sharding using a single database with multiple schemas, +where a different "schema_translates_map" can be used for each shard. + +In this example we will set a "shard id" at all times. + +""" +import datetime +import os + +from sqlalchemy import Column +from sqlalchemy import create_engine +from sqlalchemy import DateTime +from sqlalchemy import Float +from sqlalchemy import ForeignKey +from sqlalchemy import inspect +from sqlalchemy import Integer +from sqlalchemy import select +from sqlalchemy import String +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.ext.horizontal_shard import ShardedSession +from sqlalchemy.orm import relationship +from sqlalchemy.orm import sessionmaker + + +echo = True +engine = create_engine("sqlite://", echo=echo) + + +with engine.connect() as conn: + # use attached databases on sqlite to get "schemas" + for i in range(1, 5): + if os.path.exists("schema_%s.db" % i): + os.remove("schema_%s.db" % i) + conn.exec_driver_sql( + 'ATTACH DATABASE "schema_%s.db" AS schema_%s' % (i, i) + ) + +db1 = engine.execution_options(schema_translate_map={None: "schema_1"}) +db2 = engine.execution_options(schema_translate_map={None: "schema_2"}) +db3 = engine.execution_options(schema_translate_map={None: "schema_3"}) +db4 = engine.execution_options(schema_translate_map={None: "schema_4"}) + + +# create session function. this binds the shard ids +# to databases within a ShardedSession and returns it. +Session = sessionmaker( + class_=ShardedSession, + future=True, + shards={ + "north_america": db1, + "asia": db2, + "europe": db3, + "south_america": db4, + }, +) + + +# mappings and tables +Base = declarative_base() + + +# table setup. we'll store a lead table of continents/cities, and a secondary +# table storing locations. a particular row will be placed in the database +# whose shard id corresponds to the 'continent'. in this setup, secondary rows +# in 'weather_reports' will be placed in the same DB as that of the parent, but +# this can be changed if you're willing to write more complex sharding +# functions. + + +class WeatherLocation(Base): + __tablename__ = "weather_locations" + + id = Column(Integer, primary_key=True) + continent = Column(String(30), nullable=False) + city = Column(String(50), nullable=False) + + reports = relationship("Report", backref="location") + + def __init__(self, continent, city): + self.continent = continent + self.city = city + + +class Report(Base): + __tablename__ = "weather_reports" + + id = Column(Integer, primary_key=True) + location_id = Column( + "location_id", Integer, ForeignKey("weather_locations.id") + ) + temperature = Column("temperature", Float) + report_time = Column( + "report_time", DateTime, default=datetime.datetime.now + ) + + def __init__(self, temperature): + self.temperature = temperature + + +# create tables +for db in (db1, db2, db3, db4): + Base.metadata.create_all(db) + + +# step 5. define sharding functions. + +# we'll use a straight mapping of a particular set of "country" +# attributes to shard id. +shard_lookup = { + "North America": "north_america", + "Asia": "asia", + "Europe": "europe", + "South America": "south_america", +} + + +def shard_chooser(mapper, instance, clause=None): + """shard chooser. + + this is primarily invoked at persistence time. + + looks at the given instance and returns a shard id + note that we need to define conditions for + the WeatherLocation class, as well as our secondary Report class which will + point back to its WeatherLocation via its 'location' attribute. + + """ + if isinstance(instance, WeatherLocation): + return shard_lookup[instance.continent] + else: + return shard_chooser(mapper, instance.location) + + +def id_chooser(query, ident): + """id chooser. + + given a primary key identity and a legacy :class:`_orm.Query`, + return which shard we should look at. + + in this case, we only want to support this for lazy-loaded items; + any primary query should have shard id set up front. + + """ + if query.lazy_loaded_from: + # if we are in a lazy load, we can look at the parent object + # and limit our search to that same shard, assuming that's how we've + # set things up. + return [query.lazy_loaded_from.identity_token] + else: + raise NotImplementedError() + + +def execute_chooser(context): + """statement execution chooser. + + given an :class:`.ORMExecuteState` for a statement, return a list + of shards we should consult. + + As before, we want a "shard_id" execution option to be present. + Otherwise, this would be a lazy load from a parent object where we + will look for the previous token. + + """ + if context.lazy_loaded_from: + return [context.lazy_loaded_from.identity_token] + else: + return [context.execution_options["shard_id"]] + + +# configure shard chooser +Session.configure( + shard_chooser=shard_chooser, + id_chooser=id_chooser, + execute_chooser=execute_chooser, +) + +# save and load objects! + +tokyo = WeatherLocation("Asia", "Tokyo") +newyork = WeatherLocation("North America", "New York") +toronto = WeatherLocation("North America", "Toronto") +london = WeatherLocation("Europe", "London") +dublin = WeatherLocation("Europe", "Dublin") +brasilia = WeatherLocation("South America", "Brasila") +quito = WeatherLocation("South America", "Quito") + +tokyo.reports.append(Report(80.0)) +newyork.reports.append(Report(75)) +quito.reports.append(Report(85)) + +with Session() as sess: + + sess.add_all([tokyo, newyork, toronto, london, dublin, brasilia, quito]) + + sess.commit() + + t = sess.get( + WeatherLocation, + tokyo.id, + # for session.get(), we currently need to use identity_token. + # the horizontal sharding API does not yet pass through the + # execution options + identity_token="asia", + # future version + # execution_options={"shard_id": "asia"} + ) + assert t.city == tokyo.city + assert t.reports[0].temperature == 80.0 + + north_american_cities = sess.execute( + select(WeatherLocation).filter( + WeatherLocation.continent == "North America" + ), + execution_options={"shard_id": "north_america"}, + ).scalars() + + assert {c.city for c in north_american_cities} == {"New York", "Toronto"} + + europe = sess.execute( + select(WeatherLocation).filter(WeatherLocation.continent == "Europe"), + execution_options={"shard_id": "europe"}, + ).scalars() + + assert {c.city for c in europe} == {"London", "Dublin"} + + # the Report class uses a simple integer primary key. So across two + # databases, a primary key will be repeated. The "identity_token" tracks + # in memory that these two identical primary keys are local to different + # databases. + newyork_report = newyork.reports[0] + tokyo_report = tokyo.reports[0] + + assert inspect(newyork_report).identity_key == ( + Report, + (1,), + "north_america", + ) + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") + + # the token representing the originating shard is also available directly + + assert inspect(newyork_report).identity_token == "north_america" + assert inspect(tokyo_report).identity_token == "asia" diff --git a/examples/sharding/separate_tables.py b/examples/sharding/separate_tables.py index f24dde288d0..4314e223379 100644 --- a/examples/sharding/separate_tables.py +++ b/examples/sharding/separate_tables.py @@ -72,7 +72,7 @@ def id_generator(ctx): # in reality, might want to use a separate transaction for this. with engine.connect() as conn: nextid = conn.scalar(ids.select().with_for_update()) - conn.execute(ids.update(values={ids.c.nextid: ids.c.nextid + 1})) + conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid @@ -120,7 +120,7 @@ def __init__(self, temperature): # establish initial "id" in db1 with db1.begin() as conn: - conn.execute(ids.insert(), nextid=1) + conn.execute(ids.insert(), {"nextid": 1}) # step 5. define sharding functions. @@ -169,19 +169,19 @@ def id_chooser(query, ident): return ["north_america", "asia", "europe", "south_america"] -def query_chooser(query): - """query chooser. +def execute_chooser(context): + """statement execution chooser. - this also returns a list of shard ids, which can - just be all of them. but here we'll search into the Query in order - to try to narrow down the list of shards to query. + this also returns a list of shard ids, which can just be all of them. but + here we'll search into the execution context in order to try to narrow down + the list of shards to SELECT. """ ids = [] # we'll grab continent names as we find them # and convert to shard ids - for column, operator, value in _get_query_comparisons(query): + for column, operator, value in _get_select_comparisons(context.statement): # "shares_lineage()" returns True if both columns refer to the same # statement column, adjusting for any annotations present. # (an annotation is an internal clone of a Column object @@ -200,8 +200,8 @@ def query_chooser(query): return ids -def _get_query_comparisons(query): - """Search an orm.Query object for binary expressions. +def _get_select_comparisons(statement): + """Search a Select or Query object for binary expressions. Returns expressions which match a Column against one or more literal values as a list of tuples of the form @@ -236,9 +236,9 @@ def visit_binary(binary): # here we will traverse through the query's criterion, searching # for SQL constructs. We will place simple column comparisons # into a list. - if query.whereclause is not None: + if statement.whereclause is not None: visitors.traverse( - query.whereclause, + statement.whereclause, {}, { "bindparam": visit_bindparam, @@ -253,7 +253,7 @@ def visit_binary(binary): Session.configure( shard_chooser=shard_chooser, id_chooser=id_chooser, - query_chooser=query_chooser, + execute_chooser=execute_chooser, ) # save and load objects! diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index fe06b531358..9378bc3a2a6 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1569,7 +1569,8 @@ def execution_options(self, **kwargs): parameter should not be used at the level of individual ORM statement executions, as the :class:`_orm.Session` will not track objects from different schema translate maps within a single - session. + session. For multiple schema translate maps within the scope of a + single :class:`_orm.Session`, see :ref:`examples_sharding`. .. seealso:: From 714557378324f656bcf08cff37d6fad34f3ed646 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 21 Mar 2022 17:47:34 -0400 Subject: [PATCH 164/632] use begin() for settting up first PK Change-Id: I227bbb46fbcbae1f60d3f5bb4dd2b9f41ca3dd0c (cherry picked from commit 257f67b180cadf4ca0df0d1facf27d55c98df676) --- examples/sharding/separate_databases.py | 2 +- examples/sharding/separate_tables.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/sharding/separate_databases.py b/examples/sharding/separate_databases.py index accbfd79b44..9818656c3c5 100644 --- a/examples/sharding/separate_databases.py +++ b/examples/sharding/separate_databases.py @@ -56,7 +56,7 @@ def id_generator(ctx): # in reality, might want to use a separate transaction for this. - with db1.connect() as conn: + with db1.begin() as conn: nextid = conn.scalar(ids.select().with_for_update()) conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid diff --git a/examples/sharding/separate_tables.py b/examples/sharding/separate_tables.py index 4314e223379..0f6e2ffd830 100644 --- a/examples/sharding/separate_tables.py +++ b/examples/sharding/separate_tables.py @@ -70,7 +70,7 @@ def before_cursor_execute( def id_generator(ctx): # in reality, might want to use a separate transaction for this. - with engine.connect() as conn: + with engine.begin() as conn: nextid = conn.scalar(ids.select().with_for_update()) conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1})) return nextid From e23ae010f594042db8b7d2a798af6e7ec8bf8ee9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 22 Mar 2022 14:02:04 -0400 Subject: [PATCH 165/632] clarify LIMIT/ORDER BY FAQ wording this will be coming to main in a related patch. scale back the language here as we have a lot of examples that use limit without order by in order to retrieve an arbitrary row. If subqueryload is not being used, there's nothing inherently wrong with this. Change-Id: I73a37658328f46a2e48b3e467f46e324f1d6a5e8 --- doc/build/faq/ormconfiguration.rst | 33 ++++++++++++++---------------- 1 file changed, 15 insertions(+), 18 deletions(-) diff --git a/doc/build/faq/ormconfiguration.rst b/doc/build/faq/ormconfiguration.rst index 3eab2185471..f257f7ce998 100644 --- a/doc/build/faq/ormconfiguration.rst +++ b/doc/build/faq/ormconfiguration.rst @@ -234,25 +234,22 @@ The same idea applies to all the other arguments, such as ``foreign_keys``:: .. _faq_subqueryload_limit_sort: -Why is ``ORDER BY`` required with ``LIMIT`` (especially with ``subqueryload()``)? ---------------------------------------------------------------------------------- - -A relational database can return rows in any -arbitrary order, when an explicit ordering is not set. -While this ordering very often corresponds to the natural -order of rows within a table, this is not the case for all databases and -all queries. The consequence of this is that any query that limits rows -using ``LIMIT`` or ``OFFSET`` should **always** specify an ``ORDER BY``. -Otherwise, it is not deterministic which rows will actually be returned. - -When we use a SQLAlchemy method like :meth:`_query.Query.first`, we are in fact -applying a ``LIMIT`` of one to the query, so without an explicit ordering -it is not deterministic what row we actually get back. +Why is ``ORDER BY`` recommended with ``LIMIT`` (especially with ``subqueryload()``)? +------------------------------------------------------------------------------------ + +When ORDER BY is not used for a SELECT statement that returns rows, the +relational database is free to returned matched rows in any arbitrary +order. While this ordering very often corresponds to the natural +order of rows within a table, this is not the case for all databases and all +queries. The consequence of this is that any query that limits rows using +``LIMIT`` or ``OFFSET``, or which merely selects the first row of the result, +discarding the rest, will not be deterministic in terms of what result row is +returned, assuming there's more than one row that matches the query's criteria. + While we may not notice this for simple queries on databases that usually -returns rows in their natural -order, it becomes much more of an issue if we also use :func:`_orm.subqueryload` -to load related collections, and we may not be loading the collections -as intended. +returns rows in their natural order, it becomes more of an issue if we +also use :func:`_orm.subqueryload` to load related collections, and we may not +be loading the collections as intended. SQLAlchemy implements :func:`_orm.subqueryload` by issuing a separate query, the results of which are matched up to the results from the first query. From 845aacd5fc8fdc7839597c0e75c447180f7b438d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Mar 2022 10:09:18 -0400 Subject: [PATCH 166/632] fix generate series example this just drove me nuts because it didn't include render_derived(), doesn't run on PG as given Change-Id: I5d39336231c97b6cd5477644a718282709db2e1f (cherry picked from commit c565c470517e1cc70a7f33d1ad3d3256935f1121) --- lib/sqlalchemy/dialects/postgresql/base.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c80f6ec63c6..37b2113e989 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1182,11 +1182,14 @@ def update(): >>> from sqlalchemy import select, func >>> stmt = select( - ... func.generate_series(4, 1, -1).table_valued("value", with_ordinality="ordinality") + ... func.generate_series(4, 1, -1). + ... table_valued("value", with_ordinality="ordinality"). + ... render_derived() ... ) >>> print(stmt) SELECT anon_1.value, anon_1.ordinality - FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) WITH ORDINALITY AS anon_1 + FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) + WITH ORDINALITY AS anon_1(value, ordinality) .. versionadded:: 1.4.0b2 From 58fad097209e37d727988d03a60367b6e7dbc917 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Mar 2022 10:07:13 -0400 Subject: [PATCH 167/632] Add option to disable from linting for table valued function Added new parameter :paramref:`.FunctionElement.table_valued.joins_implicitly`, for the :meth:`.FunctionElement.table_valued` construct. This parameter indicates that the given table-valued function implicitly joins to the table it refers towards, essentially disabling the "from linting" feature, i.e. the "cartesian product" warning, from taking effect due to the presence of this parameter. May be used for functions such as ``func.json_each()``. Fixes: #7845 Change-Id: I80edcb74efbd4417172132c0db4d9c756fdd5eae (cherry picked from commit 04dcc5c704dbf0b22705523e263e512c24936175) --- doc/build/changelog/unreleased_14/7845.rst | 11 ++++++++ lib/sqlalchemy/sql/compiler.py | 2 ++ lib/sqlalchemy/sql/functions.py | 29 +++++++++++++++++++--- lib/sqlalchemy/sql/selectable.py | 10 +++++++- test/sql/test_from_linter.py | 28 +++++++++++++++++++++ 5 files changed, 76 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7845.rst diff --git a/doc/build/changelog/unreleased_14/7845.rst b/doc/build/changelog/unreleased_14/7845.rst new file mode 100644 index 00000000000..1cfa9cdf6bb --- /dev/null +++ b/doc/build/changelog/unreleased_14/7845.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: usecase, sql + :tickets: 7845 + + Added new parameter + :paramref:`.FunctionElement.table_valued.joins_implicitly`, for the + :meth:`.FunctionElement.table_valued` construct. This parameter indicates + that the given table-valued function implicitly joins to the table it + refers towards, essentially disabling the "from linting" feature, i.e. the + "cartesian product" warning, from taking effect due to the presence of this + parameter. May be used for functions such as ``func.json_each()``. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 7780d3782a4..671ca674924 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2805,6 +2805,8 @@ def visit_cte( return self.preparer.format_alias(cte, cte_name) def visit_table_valued_alias(self, element, **kw): + if element.joins_implicitly: + kw["from_linter"] = None if element._is_lateral: return self.visit_lateral(element, **kw) else: diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 8c07bc06699..e0ff1655f9f 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -212,8 +212,16 @@ def table_valued(self, *expr, **kw): string name will be added as a column to the .c collection of the resulting :class:`_sql.TableValuedAlias`. + :param joins_implicitly: when True, the table valued function may be + used in the FROM clause without any explicit JOIN to other tables + in the SQL query, and no "cartesian product" warning will be generated. + May be useful for SQL functions such as ``func.json_each()``. + + .. versionadded:: 1.4.33 + .. versionadded:: 1.4.0b2 + .. seealso:: :ref:`tutorial_functions_table_valued` - in the :ref:`unified_tutorial` @@ -234,6 +242,7 @@ def table_valued(self, *expr, **kw): new_func = self._generate() with_ordinality = kw.pop("with_ordinality", None) + joins_implicitly = kw.pop("joins_implicitly", None) name = kw.pop("name", None) if with_ordinality: @@ -244,7 +253,7 @@ def table_valued(self, *expr, **kw): *expr ) - return new_func.alias(name=name) + return new_func.alias(name=name, joins_implicitly=joins_implicitly) def column_valued(self, name=None): """Return this :class:`_functions.FunctionElement` as a column expression that @@ -497,7 +506,7 @@ def within_group_type(self, within_group): return None - def alias(self, name=None): + def alias(self, name=None, joins_implicitly=False): r"""Produce a :class:`_expression.Alias` construct against this :class:`.FunctionElement`. @@ -539,6 +548,17 @@ def alias(self, name=None): .. versionadded:: 1.4.0b2 Added the ``.column`` accessor + :param name: alias name, will be rendered as ``AS `` in the + FROM clause + + :param joins_implicitly: when True, the table valued function may be + used in the FROM clause without any explicit JOIN to other tables + in the SQL query, and no "cartesian product" warning will be + generated. May be useful for SQL functions such as + ``func.json_each()``. + + .. versionadded:: 1.4.33 + .. seealso:: :ref:`tutorial_functions_table_valued` - @@ -554,7 +574,10 @@ def alias(self, name=None): """ return TableValuedAlias._construct( - self, name, table_value_type=self.type + self, + name, + table_value_type=self.type, + joins_implicitly=joins_implicitly, ) def select(self): diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 5516898a83f..125c3724b82 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1764,6 +1764,7 @@ class TableValuedAlias(Alias): _supports_derived_columns = True _render_derived = False _render_derived_w_types = False + joins_implicitly = False _traverse_internals = [ ("element", InternalTraversal.dp_clauseelement), @@ -1773,9 +1774,16 @@ class TableValuedAlias(Alias): ("_render_derived_w_types", InternalTraversal.dp_boolean), ] - def _init(self, selectable, name=None, table_value_type=None): + def _init( + self, + selectable, + name=None, + table_value_type=None, + joins_implicitly=False, + ): super(TableValuedAlias, self)._init(selectable, name=name) + self.joins_implicitly = joins_implicitly self._tableval_type = ( type_api.TABLEVALUE if table_value_type is None diff --git a/test/sql/test_from_linter.py b/test/sql/test_from_linter.py index a2291386852..4a4d907f965 100644 --- a/test/sql/test_from_linter.py +++ b/test/sql/test_from_linter.py @@ -1,6 +1,10 @@ +from sqlalchemy import column +from sqlalchemy import func from sqlalchemy import Integer +from sqlalchemy import JSON from sqlalchemy import select from sqlalchemy import sql +from sqlalchemy import table from sqlalchemy import testing from sqlalchemy import true from sqlalchemy.testing import config @@ -161,6 +165,30 @@ def test_lateral_subqueries_ok_do_we_still_find_cartesians(self): assert start is p3 assert froms == {p1} + @testing.combinations(True, False, argnames="joins_implicitly") + def test_table_valued(self, joins_implicitly): + """test #7845""" + my_table = table( + "tbl", + column("id", Integer), + column("data", JSON()), + ) + + sub_dict = my_table.c.data["d"] + tv = func.json_each(sub_dict).table_valued( + "key", joins_implicitly=joins_implicitly + ) + has_key = tv.c.key == "f" + stmt = select(my_table.c.id).where(has_key) + froms, start = find_unmatching_froms(stmt, my_table) + + if joins_implicitly: + is_(start, None) + is_(froms, None) + else: + assert start == my_table + assert froms == {tv} + def test_count_non_eq_comparison_operators(self): query = select(self.a).where(self.a.c.col_a > self.b.c.col_b) froms, start = find_unmatching_froms(query, self.a) From 548e41ec5df96435d7cf4d0f7cde4cc7cd4e2f61 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 22 Mar 2022 20:14:04 -0400 Subject: [PATCH 168/632] trust user PK argument as given; don't reduce Fixed issue where the :class:`_orm.Mapper` would reduce a user-defined :paramref:`_orm.Mapper.primary_key` argument too aggressively, in the case of mapping to a ``UNION`` where for some of the SELECT entries, two columns are essentially equivalent, but in another, they are not, such as in a recursive CTE. The logic here has been changed to accept a given user-defined PK as given, where columns will be related to the mapped selectable but no longer "reduced" as this heuristic can't accommodate for all situations. Fixes: #7842 Change-Id: Ie46f0a3d42cae0501641fa213da0a9d5ca26c3ad (cherry picked from commit d051645463b169bf1535459653eff247cb772e62) --- doc/build/changelog/unreleased_14/7842.rst | 12 +++++ lib/sqlalchemy/orm/mapper.py | 18 +++---- test/orm/test_mapper.py | 61 ++++++++++++++++++++++ 3 files changed, 82 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7842.rst diff --git a/doc/build/changelog/unreleased_14/7842.rst b/doc/build/changelog/unreleased_14/7842.rst new file mode 100644 index 00000000000..c165ed44bff --- /dev/null +++ b/doc/build/changelog/unreleased_14/7842.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, orm + :tickets: 7842 + + Fixed issue where the :class:`_orm.Mapper` would reduce a user-defined + :paramref:`_orm.Mapper.primary_key` argument too aggressively, in the case + of mapping to a ``UNION`` where for some of the SELECT entries, two columns + are essentially equivalent, but in another, they are not, such as in a + recursive CTE. The logic here has been changed to accept a given + user-defined PK as given, where columns will be related to the mapped + selectable but no longer "reduced" as this heuristic can't accommodate for + all situations. diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 967c1064ad3..b4130b91941 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1376,17 +1376,17 @@ def _configure_pks(self): # that of the inheriting (unless concrete or explicit) self.primary_key = self.inherits.primary_key else: - # determine primary key from argument or persist_selectable pks - - # reduce to the minimal set of columns + # determine primary key from argument or persist_selectable pks if self._primary_key_argument: - primary_key = sql_util.reduce_columns( - [ - self.persist_selectable.corresponding_column(c) - for c in self._primary_key_argument - ], - ignore_nonexistent_tables=True, - ) + primary_key = [ + self.persist_selectable.corresponding_column(c) + for c in self._primary_key_argument + ] else: + # if heuristically determined PKs, reduce to the minimal set + # of columns by eliminating FK->PK pairs for a multi-table + # expression. May over-reduce for some kinds of UNIONs + # / CTEs; use explicit PK argument for these special cases primary_key = sql_util.reduce_columns( self._pks_by_table[self.persist_selectable], ignore_nonexistent_tables=True, diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index fa837c678f7..9e2a7f63a66 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -5,6 +5,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String @@ -43,6 +44,7 @@ from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import ne_ +from sqlalchemy.testing.fixtures import ComparableEntity from sqlalchemy.testing.fixtures import ComparableMixin from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -1416,6 +1418,65 @@ def test_mapping_to_outerjoin_no_partial_pks(self): ], ) + @testing.requires.ctes + def test_mapping_to_union_dont_overlimit_pk(self, registry, connection): + """test #7842""" + Base = registry.generate_base() + + class Node(Base): + __tablename__ = "cte_nodes" + + id = Column(Integer, primary_key=True) + parent = Column(Integer, ForeignKey("cte_nodes.id")) + + # so we dont have to deal with NULLS FIRST + sort_key = Column(Integer) + + class NodeRel(ComparableEntity, Base): + table = select( + Node.id, Node.parent, Node.sort_key, literal(0).label("depth") + ).cte(recursive=True) + __table__ = table.union_all( + select( + Node.id, + table.c.parent, + table.c.sort_key, + table.c.depth + literal(1), + ) + .select_from(Node) + .join(table, Node.parent == table.c.id) + ) + + __mapper_args__ = { + "primary_key": (__table__.c.id, __table__.c.parent) + } + + nt = NodeRel.__table__ + + eq_(NodeRel.__mapper__.primary_key, (nt.c.id, nt.c.parent)) + + registry.metadata.create_all(connection) + with Session(connection) as session: + n1, n2, n3, n4 = ( + Node(id=1, sort_key=1), + Node(id=2, parent=1, sort_key=2), + Node(id=3, parent=2, sort_key=3), + Node(id=4, parent=3, sort_key=4), + ) + session.add_all([n1, n2, n3, n4]) + session.commit() + + q_rel = select(NodeRel).filter_by(id=4).order_by(NodeRel.sort_key) + eq_( + session.scalars(q_rel).all(), + [ + NodeRel(id=4, parent=None), + NodeRel(id=4, parent=1), + NodeRel(id=4, parent=2), + NodeRel(id=4, parent=3), + ], + ) + def test_scalar_pk_arg(self): users, Keyword, items, Item, User, keywords = ( self.tables.users, From 4ccf8b72147496a033ef0c517bcf7345c516b3e8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 24 Mar 2022 13:58:20 -0400 Subject: [PATCH 169/632] more autocommit messaging Further clarified connection-level logging to indicate the BEGIN, ROLLBACK and COMMIT log messages do not actually indicate a real transaction when the AUTOCOMMIT isolation level is in use; messaging has been extended to include the BEGIN message itself, and the messaging has also been fixed to accommodate when the :class:`.Engine` level :paramref:`.create_engine.isolation_level` parameter was used directly. Fixes: #7853 Change-Id: Iafc78070737ad117f84262e4bde84b81a81e4ea1 (cherry picked from commit 56366924673f88e51c74d94058c11132a057ecfa) --- doc/build/changelog/unreleased_14/7853.rst | 10 +++++ lib/sqlalchemy/engine/base.py | 27 +++++++++---- test/engine/test_logging.py | 45 +++++++++++++++++++++- 3 files changed, 72 insertions(+), 10 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7853.rst diff --git a/doc/build/changelog/unreleased_14/7853.rst b/doc/build/changelog/unreleased_14/7853.rst new file mode 100644 index 00000000000..66856c29e03 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7853.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, engine + :tickets: 7853 + + Further clarified connection-level logging to indicate the BEGIN, ROLLBACK + and COMMIT log messages do not actually indicate a real transaction when + the AUTOCOMMIT isolation level is in use; messaging has been extended to + include the BEGIN message itself, and the messaging has also been fixed to + accommodate when the :class:`.Engine` level + :paramref:`.create_engine.isolation_level` parameter was used directly. diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index cf6a14728b1..b5a3096e5bb 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -905,10 +905,15 @@ def in_nested_transaction(self): and self._nested_transaction.is_active ) - def _is_autocommit(self): - return ( - self._execution_options.get("isolation_level", None) - == "AUTOCOMMIT" + def _is_autocommit_isolation(self): + opt_iso = self._execution_options.get("isolation_level", None) + return bool( + opt_iso == "AUTOCOMMIT" + or ( + opt_iso is None + and getattr(self.engine.dialect, "isolation_level", None) + == "AUTOCOMMIT" + ) ) def get_transaction(self): @@ -939,7 +944,13 @@ def _begin_impl(self, transaction): assert not self.__branch_from if self._echo: - self._log_info("BEGIN (implicit)") + if self._is_autocommit_isolation(): + self._log_info( + "BEGIN (implicit; DBAPI should not BEGIN due to " + "autocommit mode)" + ) + else: + self._log_info("BEGIN (implicit)") self.__in_begin = True @@ -961,7 +972,7 @@ def _rollback_impl(self): if self._still_open_and_dbapi_connection_is_valid: if self._echo: - if self._is_autocommit(): + if self._is_autocommit_isolation(): self._log_info( "ROLLBACK using DBAPI connection.rollback(), " "DBAPI should ignore due to autocommit mode" @@ -980,7 +991,7 @@ def _commit_impl(self, autocommit=False): # if a connection has this set as the isolation level, we can skip # the "autocommit" warning as the operation will do "autocommit" # in any case - if autocommit and not self._is_autocommit(): + if autocommit and not self._is_autocommit_isolation(): util.warn_deprecated_20( "The current statement is being autocommitted using " "implicit autocommit, which will be removed in " @@ -993,7 +1004,7 @@ def _commit_impl(self, autocommit=False): self.dispatch.commit(self) if self._echo: - if self._is_autocommit(): + if self._is_autocommit_isolation(): self._log_info( "COMMIT using DBAPI connection.commit(), " "DBAPI should ignore due to autocommit mode" diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py index 806336368b1..7a0ed6e7934 100644 --- a/test/engine/test_logging.py +++ b/test/engine/test_logging.py @@ -640,6 +640,13 @@ def logging_engine(self, testing_engine): e.connect().close() return e + @testing.fixture() + def autocommit_iso_logging_engine(self, testing_engine): + kw = {"echo": True, "future": True, "isolation_level": "AUTOCOMMIT"} + e = testing_engine(options=kw) + e.connect().close() + return e + @testing.fixture() def plain_logging_engine(self, testing_engine): # deliver an engine with logging using the plain logging API, @@ -675,6 +682,38 @@ def test_commit_as_you_go_block_rollback(self, logging_engine, assert_buf): assert_buf(["BEGIN (implicit)", "ROLLBACK"]) + def test_commit_as_you_go_block_commit_engine_level_autocommit( + self, autocommit_iso_logging_engine, assert_buf + ): + with autocommit_iso_logging_engine.connect() as conn: + conn.begin() + conn.commit() + + assert_buf( + [ + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", + "COMMIT using DBAPI connection.commit(), DBAPI " + "should ignore due to autocommit mode", + ] + ) + + def test_commit_engine_level_autocommit_exec_opt_nonauto( + self, autocommit_iso_logging_engine, assert_buf + ): + with autocommit_iso_logging_engine.execution_options( + isolation_level=testing.db.dialect.default_isolation_level + ).connect() as conn: + conn.begin() + conn.commit() + + assert_buf( + [ + "BEGIN (implicit)", + "COMMIT", + ] + ) + def test_commit_as_you_go_block_commit_autocommit( self, logging_engine, assert_buf ): @@ -686,7 +725,8 @@ def test_commit_as_you_go_block_commit_autocommit( assert_buf( [ - "BEGIN (implicit)", + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", "COMMIT using DBAPI connection.commit(), DBAPI " "should ignore due to autocommit mode", ] @@ -703,7 +743,8 @@ def test_commit_as_you_go_block_rollback_autocommit( assert_buf( [ - "BEGIN (implicit)", + "BEGIN (implicit; DBAPI should not " + "BEGIN due to autocommit mode)", "ROLLBACK using DBAPI connection.rollback(), DBAPI " "should ignore due to autocommit mode", ] From cdb67369e7c292c6639cf54da487e8482499f5fb Mon Sep 17 00:00:00 2001 From: aathan Date: Wed, 23 Feb 2022 17:06:42 -0500 Subject: [PATCH 170/632] Clarify associationproxy.rst examples by using a one word keyword The examples are based on a `keyword` attribute but use a value that a human may interpret as multiple keywords. Closes: #7582 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7582 Pull-request-sha: 18b027035ece587c8bfbecff5a96385bf908edca Change-Id: I30e8c3c0f46c66058131dfadd2b70959913a6335 (cherry picked from commit 1d4d7d858c0f724ff684423746f986b20b972476) --- doc/build/orm/extensions/associationproxy.rst | 58 +++++++++++-------- 1 file changed, 35 insertions(+), 23 deletions(-) diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index de2001e6f58..1344bc84a83 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -15,6 +15,7 @@ the construction of sophisticated collections and dictionary views of virtually any geometry, persisted to the database using standard, transparently configured relational patterns. +.. _associationproxy_scalar_collections: Simplifying Scalar Collections ------------------------------ @@ -57,13 +58,13 @@ with ``User`` requires traversal from each collection element to the ``.keyword` attribute, which can be awkward:: >>> user = User('jek') - >>> user.kw.append(Keyword('cheese inspector')) + >>> user.kw.append(Keyword('cheese-inspector')) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) - cheese inspector + cheese-inspector >>> print([keyword.keyword for keyword in user.kw]) - ['cheese inspector'] + ['cheese-inspector'] The ``association_proxy`` is applied to the ``User`` class to produce a "view" of the ``kw`` relationship, which only exposes the string @@ -88,9 +89,9 @@ which is both readable and writable. New ``Keyword`` objects are created for us transparently:: >>> user = User('jek') - >>> user.keywords.append('cheese inspector') + >>> user.keywords.append('cheese-inspector') >>> user.keywords - ['cheese inspector'] + ['cheese-inspector'] >>> user.keywords.append('snack ninja') >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] @@ -120,11 +121,11 @@ assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: - user.keywords.append('cheese inspector') + user.keywords.append('cheese-inspector') Is translated by the association proxy into the operation:: - user.kw.append(Keyword('cheese inspector')) + user.kw.append(Keyword('cheese-inspector')) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a @@ -214,9 +215,10 @@ collection of ``User`` to the ``.keyword`` attribute present on each def __repr__(self): return 'Keyword(%s)' % repr(self.keyword) -With the above configuration, we can operate upon the ``.keywords`` -collection of each ``User`` object, and the usage of ``UserKeyword`` -is concealed:: +With the above configuration, we can operate upon the ``.keywords`` collection +of each ``User`` object, each of which exposes a collection of ``Keyword`` +objects that are obtained from the underyling ``UserKeyword`` elements:: + >>> user = User('log') >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): @@ -225,22 +227,32 @@ is concealed:: >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] -Where above, each ``.keywords.append()`` operation is equivalent to:: +This example is in contrast to the example illustrated previously at +:ref:`associationproxy_scalar_collections`, where the association proxy exposed +a collection of strings, rather than a collection of composed objects. +In this case, each ``.keywords.append()`` operation is equivalent to:: >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) -The ``UserKeyword`` association object has two attributes here which are populated; -the ``.keyword`` attribute is populated directly as a result of passing -the ``Keyword`` object as the first argument. The ``.user`` argument is then -assigned as the ``UserKeyword`` object is appended to the ``User.user_keywords`` -collection, where the bidirectional relationship configured between ``User.user_keywords`` -and ``UserKeyword.user`` results in a population of the ``UserKeyword.user`` attribute. -The ``special_key`` argument above is left at its default value of ``None``. +The ``UserKeyword`` association object has two attributes that are both +populated within the scope of the ``append()`` operation of the association +proxy; ``.keyword``, which refers to the +``Keyword` object, and ``.user``, which refers to the ``User``. +The ``.keyword`` attribute is populated first, as the association proxy +generates a new ``UserKeyword`` object in response to the ``.append()`` +operation, assigning the given ``Keyword`` instance to the ``.keyword`` +attribute. Then, as the ``UserKeyword`` object is appended to the +``User.user_keywords`` collection, the ``UserKeyword.user`` attribute, +configured as ``back_populates`` for ``User.user_keywords``, is initialized +upon the given ``UserKeyword`` instance to refer to the parent ``User`` +receiving the append operation. The ``special_key`` +argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we -create the ``UserKeyword`` object explicitly. Below we assign all three -attributes, where the assignment of ``.user`` has the effect of the ``UserKeyword`` -being appended to the ``User.user_keywords`` collection:: +create the ``UserKeyword`` object explicitly. Below we assign all +three attributes, wherein the assignment of ``.user`` during +construction, has the effect of appending the new ``UserKeyword`` to +the ``User.user_keywords`` collection (via the relationship):: >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') @@ -275,7 +287,7 @@ when new elements are added to the dictionary:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import backref, declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() @@ -350,7 +362,7 @@ present on ``UserKeyword``:: from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import backref, declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() From 1a196d5b7d47ba1d207661e05605bc79badc9a7d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 26 Mar 2022 10:09:10 -0400 Subject: [PATCH 171/632] support BLANK_SCHEMA, RETAIN_SCHEMA FK schema on copy Added support so that the :paramref:`.Table.tometadata.referred_schema_fn` callable passed to :meth:`.Table.to_metadata` may return the value :data:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be reset to None. The :data.`RETAIN_SCHEMA` symbol may also be returned from this function to indicate "no change", which will behave the same as ``None`` currently does which also indicates no change. Fixes: #7860 Change-Id: I82a45988d534295d8356453f68001b21d4ff706d (cherry picked from commit 74d3f3965b383aef7421f2cf1779573a4ac65987) --- doc/build/changelog/unreleased_14/7860.rst | 11 +++++++++++ lib/sqlalchemy/sql/schema.py | 19 +++++++++++++++---- test/sql/test_metadata.py | 22 ++++++++++++++++++++++ 3 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7860.rst diff --git a/doc/build/changelog/unreleased_14/7860.rst b/doc/build/changelog/unreleased_14/7860.rst new file mode 100644 index 00000000000..cb6bcc59fce --- /dev/null +++ b/doc/build/changelog/unreleased_14/7860.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: usecase, schema + :tickets: 7860 + + Added support so that the :paramref:`.Table.tometadata.referred_schema_fn` + callable passed to :meth:`.Table.to_metadata` may return the value + :data:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be + reset to None. The :data.`RETAIN_SCHEMA` symbol may also be returned from + this function to indicate "no change", which will behave the same as + ``None`` currently does which also indicates no change. + diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 6240357f54e..7c9fef644cf 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -62,7 +62,8 @@ BLANK_SCHEMA = util.symbol( "blank_schema", - """Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence` + """Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object should have 'None' for its schema, even if the parent :class:`_schema.MetaData` has specified a schema. @@ -1047,7 +1048,14 @@ def to_metadata( target schema that we are changing to, the :class:`_schema.ForeignKeyConstraint` object, and the existing "target schema" of that constraint. The function should return the - string schema name that should be applied. + string schema name that should be applied. To reset the schema + to "none", return the symbol :data:`.BLANK_SCHEMA`. To effect no + change, return ``None`` or :data:`.RETAIN_SCHEMA`. + + .. versionchanged:: 1.4.33 The ``referred_schema_fn`` function + may return the :data:`.BLANK_SCHEMA` or :data:`.RETAIN_SCHEMA` + symbols. + E.g.:: def referred_schema_fn(table, to_schema, @@ -2292,11 +2300,14 @@ def _get_colspec(self, schema=None, table_name=None): argument first passed to the object's constructor. """ - if schema: + if schema not in (None, RETAIN_SCHEMA): _schema, tname, colname = self._column_tokens if table_name is not None: tname = table_name - return "%s.%s.%s" % (schema, tname, colname) + if schema is BLANK_SCHEMA: + return "%s.%s" % (tname, colname) + else: + return "%s.%s.%s" % (schema, tname, colname) elif table_name: schema, tname, colname = self._column_tokens if schema: diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index e193c5ec7af..7205c882333 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -41,6 +41,7 @@ from sqlalchemy.sql import operators from sqlalchemy.sql.elements import _NONE_NAME from sqlalchemy.sql.elements import literal_column +from sqlalchemy.sql.schema import RETAIN_SCHEMA from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL @@ -1274,6 +1275,27 @@ def ref_fn(table, to_schema, constraint, referred_schema): self._assert_fk(t2, "z", "h.t1.x", referred_schema_fn=ref_fn) + def test_fk_reset_to_none(self): + m = MetaData() + + t2 = Table("t2", m, Column("y", Integer, ForeignKey("p.t1.x"))) + + def ref_fn(table, to_schema, constraint, referred_schema): + return BLANK_SCHEMA + + self._assert_fk(t2, None, "t1.x", referred_schema_fn=ref_fn) + + @testing.combinations(None, RETAIN_SCHEMA) + def test_fk_test_non_return_for_referred_schema(self, sym): + m = MetaData() + + t2 = Table("t2", m, Column("y", Integer, ForeignKey("p.t1.x"))) + + def ref_fn(table, to_schema, constraint, referred_schema): + return sym + + self._assert_fk(t2, None, "p.t1.x", referred_schema_fn=ref_fn) + def test_copy_info(self): m = MetaData() fk = ForeignKey("t2.id") From 92fc2cebaa3d18532f9d39c3886f7286673c0786 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 28 Mar 2022 11:23:39 -0400 Subject: [PATCH 172/632] block asyncmy 0.2.4 asyncmy made a backwards incompatible change to the TIME datatype in 0.2.4 which hopefully will be reverted as this change does not belong in a minor point release Change-Id: Id8bf932d70f52caba06094308a5b87387e69b94b References: https://github.com/long2ice/asyncmy/issues/37 (cherry picked from commit bbd6420b39edb24481f171b8d0081149dd2e5ba7) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 807ee917059..85c4796aee8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -77,7 +77,7 @@ aiomysql = aiomysql;python_version>="3" asyncmy = %(asyncio)s - asyncmy>=0.2.3;python_version>="3" + asyncmy>=0.2.3,!=0.2.4;python_version>="3" aiosqlite = %(asyncio)s aiosqlite;python_version>="3" From 32cac8f9ff223acfaea4f33798ebcb658382d145 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 28 Mar 2022 16:07:51 -0400 Subject: [PATCH 173/632] pin click pre-8.1 for black targets both packages seem to have a bunch of updates, but as this branch is still formatted for python 2, keep black at its old version and keep click before 8.1 which seems to be where symbols that are used by the older black versions were removed Change-Id: I432a59fb7512cf7c453815e17ec5e0559e5d6be7 --- .pre-commit-config.yaml | 2 ++ tox.ini | 1 + 2 files changed, 3 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f8512708ee8..2be64772dd0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,6 +5,8 @@ repos: rev: 21.5b1 hooks: - id: black + additional_dependencies: + - click < 8.1 - repo: https://github.com/sqlalchemyorg/zimports rev: v0.5.0 diff --git a/tox.ini b/tox.ini index 505af98e80e..2000351716b 100644 --- a/tox.ini +++ b/tox.ini @@ -150,6 +150,7 @@ deps= pydocstyle pygments black==21.5b1 + click<8.1 commands = flake8 ./lib/ ./test/ ./examples/ setup.py doc/build/conf.py {posargs} black --check ./lib/ ./test/ ./examples/ setup.py doc/build/conf.py From 256efbb2b8ffa69e3e08bc987161a8e1b9649dc9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 28 Mar 2022 13:46:24 -0400 Subject: [PATCH 174/632] fix quotes regexp for SQLite CHECK constraints Fixed bug where the name of CHECK constraints under SQLite would not be reflected if the name were created using quotes, as is the case when the name uses mixed case or special characters. Fixes: #5463 Change-Id: Ic3b1e0a0385fb9e727b0880e90815ea2814df313 (cherry picked from commit cb52b934000047278dbb63d0cfffdb4eae1f669c) --- doc/build/changelog/unreleased_14/5463.rst | 8 ++++++++ lib/sqlalchemy/dialects/sqlite/base.py | 12 ++++++++---- lib/sqlalchemy/testing/suite/test_reflection.py | 6 ++++-- 3 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/5463.rst diff --git a/doc/build/changelog/unreleased_14/5463.rst b/doc/build/changelog/unreleased_14/5463.rst new file mode 100644 index 00000000000..5de6182acf5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/5463.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, sqlite, reflection + :tickets: 5463 + + Fixed bug where the name of CHECK constraints under SQLite would not be + reflected if the name were created using quotes, as is the case when the + name uses mixed case or special characters. + diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 7ba9700d709..49e4b5c1955 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2449,17 +2449,21 @@ def get_check_constraints(self, connection, table_name, schema=None, **kw): if not table_data: return [] - CHECK_PATTERN = r"(?:CONSTRAINT (\w+) +)?" r"CHECK *\( *(.+) *\),? *" + CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?" r"CHECK *\( *(.+) *\),? *" check_constraints = [] # NOTE: we aren't using re.S here because we actually are # taking advantage of each CHECK constraint being all on one # line in the table definition in order to delineate. This # necessarily makes assumptions as to how the CREATE TABLE # was emitted. + for match in re.finditer(CHECK_PATTERN, table_data, re.I): - check_constraints.append( - {"sqltext": match.group(2), "name": match.group(1)} - ) + name = match.group(1) + + if name: + name = re.sub(r'^"|"$', "", name) + + check_constraints.append({"sqltext": match.group(2), "name": name}) return check_constraints diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index a1f2e3bc94b..459a4d8211c 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1152,7 +1152,9 @@ def test_get_check_constraints(self, metadata, connection, use_schema): metadata, Column("a", Integer()), sa.CheckConstraint("a > 1 AND a < 5", name="cc1"), - sa.CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"), + sa.CheckConstraint( + "a = 1 OR (a > 2 AND a < 5)", name="UsesCasing" + ), schema=schema, ) @@ -1179,8 +1181,8 @@ def normalize(sqltext): eq_( reflected, [ + {"name": "UsesCasing", "sqltext": "a = 1 or a > 2 and a < 5"}, {"name": "cc1", "sqltext": "a > 1 and a < 5"}, - {"name": "cc2", "sqltext": "a = 1 or a > 2 and a < 5"}, ], ) From f4654ed33375e50254bd4f17f94c38be8df5d02a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 26 Mar 2022 16:20:34 -0400 Subject: [PATCH 175/632] column_descriptions or equiv for DML, core select Added new attributes :attr:`.ValuesBase.returning_column_descriptions` and :attr:`.ValuesBase.entity_description` to allow for inspection of ORM attributes and entities that are installed as part of an :class:`.Insert`, :class:`.Update`, or :class:`.Delete` construct. The :attr:`.Select.column_descriptions` accessor is also now implemented for Core-only selectables. Fixes: #7861 Change-Id: Ia6a1cd24c798ba61f4e8e8eac90a0fd00d738342 (cherry picked from commit 2f1df5f9105149d6cb01c8b6ab6b9ccffa020780) --- doc/build/changelog/unreleased_14/7861.rst | 10 ++ doc/build/glossary.rst | 3 +- doc/build/orm/queryguide.rst | 99 ++++++++++++++++ lib/sqlalchemy/orm/persistence.py | 56 ++++++++- lib/sqlalchemy/orm/query.py | 9 ++ lib/sqlalchemy/sql/dml.py | 98 +++++++++++++++ lib/sqlalchemy/sql/selectable.py | 50 +++++++- test/orm/test_core_compilation.py | 104 +++++++++++++++- test/sql/test_selectable.py | 132 +++++++++++++++++++++ 9 files changed, 552 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7861.rst diff --git a/doc/build/changelog/unreleased_14/7861.rst b/doc/build/changelog/unreleased_14/7861.rst new file mode 100644 index 00000000000..49ac82ad84a --- /dev/null +++ b/doc/build/changelog/unreleased_14/7861.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: usecase, orm + :tickets: 7861 + + Added new attributes :attr:`.UpdateBase.returning_column_descriptions` and + :attr:`.UpdateBase.entity_description` to allow for inspection of ORM + attributes and entities that are installed as part of an :class:`.Insert`, + :class:`.Update`, or :class:`.Delete` construct. The + :attr:`.Select.column_descriptions` accessor is also now implemented for + Core-only selectables. diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 2a1e17d3101..2eb7912497f 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -179,8 +179,9 @@ Glossary within the join expression. plugin + plugin-enabled plugin-specific - "plugin-specific" generally indicates a function or method in + "plugin-enabled" or "plugin-specific" generally indicates a function or method in SQLAlchemy Core which will behave differently when used in an ORM context. diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 91c0b311980..f6d6ce711c0 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1082,3 +1082,102 @@ matching objects locally present in the :class:`_orm.Session`. See the section >>> conn.close() ROLLBACK + +.. _queryguide_inspection: + +Inspecting entities and columns from ORM-enabled SELECT and DML statements +========================================================================== + +The :func:`.select` construct, as well as the :func:`.insert`, :func:`.update` +and :func:`.delete` constructs (for the latter DML constructs, as of SQLAlchemy +1.4.33), all support the ability to inspect the entities in which these +statements are created against, as well as the columns and datatypes that would +be returned in a result set. + +For a :class:`.Select` object, this information is available from the +:attr:`.Select.column_descriptions` attribute. This attribute operates in the +same way as the legacy :attr:`.Query.column_descriptions` attribute. The format +returned is a list of dictionaries:: + + >>> from pprint import pprint + >>> user_alias = aliased(User, name='user2') + >>> stmt = select(User, User.id, user_alias) + >>> pprint(stmt.column_descriptions) + [{'aliased': False, + 'entity': , + 'expr': , + 'name': 'User', + 'type': }, + {'aliased': False, + 'entity': , + 'expr': <....InstrumentedAttribute object at ...>, + 'name': 'id', + 'type': Integer()}, + {'aliased': True, + 'entity': , + 'expr': , + 'name': 'user2', + 'type': }] + + +When :attr:`.Select.column_descriptions` is used with non-ORM objects +such as plain :class:`.Table` or :class:`.Column` objects, the entries +will contain basic information about individual columns returned in all +cases:: + + >>> stmt = select(user_table, address_table.c.id) + >>> pprint(stmt.column_descriptions) + [{'expr': Column('id', Integer(), table=, primary_key=True, nullable=False), + 'name': 'id', + 'type': Integer()}, + {'expr': Column('name', String(length=30), table=), + 'name': 'name', + 'type': String(length=30)}, + {'expr': Column('fullname', String(), table=), + 'name': 'fullname', + 'type': String()}, + {'expr': Column('id', Integer(), table=
, primary_key=True, nullable=False), + 'name': 'id_1', + 'type': Integer()}] + +.. versionchanged:: 1.4.33 The :attr:`.Select.column_descriptions` attribute now returns + a value when used against a :class:`.Select` that is not ORM-enabled. Previously, + this would raise ``NotImplementedError``. + + +For :func:`.insert`, :func:`.update` and :func:`.delete` constructs, there are +two separate attributes. One is :attr:`.UpdateBase.entity_description` which +returns information about the primary ORM entity and database table which the +DML construct would be affecting:: + + >>> from sqlalchemy import update + >>> stmt = update(User).values(name="somename").returning(User.id) + >>> pprint(stmt.entity_description) + {'entity': , + 'expr': , + 'name': 'User', + 'table': Table('user_account', ...), + 'type': } + +.. tip:: The :attr:`.UpdateBase.entity_description` includes an entry + ``"table"`` which is actually the **table to be inserted, updated or + deleted** by the statement, which is **not** always the same as the SQL + "selectable" to which the class may be mapped. For example, in a + joined-table inheritance scenario, ``"table"`` will refer to the local table + for the given entity. + +The other is :attr:`.UpdateBase.returning_column_descriptions` which +delivers information about the columns present in the RETURNING collection +in a manner roughly similar to that of :attr:`.Select.column_descriptions`:: + + >>> pprint(stmt.returning_column_descriptions) + [{'aliased': False, + 'entity': , + 'expr': , + 'name': 'id', + 'type': Integer()}] + +.. versionadded:: 1.4.33 Added the :attr:`.UpdateBase.entity_description` + and :attr:`.UpdateBase.returning_column_descriptions` attributes. + + diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index f7b665becad..c3d2cfd49c9 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -40,6 +40,7 @@ from ..sql.base import CompileState from ..sql.base import Options from ..sql.dml import DeleteDMLState +from ..sql.dml import InsertDMLState from ..sql.dml import UpdateDMLState from ..sql.elements import BooleanClauseList from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL @@ -2137,8 +2138,59 @@ def skip_for_full_returning(orm_context): } +class ORMDMLState: + @classmethod + def get_entity_description(cls, statement): + ext_info = statement.table._annotations["parententity"] + mapper = ext_info.mapper + if ext_info.is_aliased_class: + _label_name = ext_info.name + else: + _label_name = mapper.class_.__name__ + + return { + "name": _label_name, + "type": mapper.class_, + "expr": ext_info.entity, + "entity": ext_info.entity, + "table": mapper.local_table, + } + + @classmethod + def get_returning_column_descriptions(cls, statement): + def _ent_for_col(c): + return c._annotations.get("parententity", None) + + def _attr_for_col(c, ent): + if ent is None: + return c + proxy_key = c._annotations.get("proxy_key", None) + if not proxy_key: + return c + else: + return getattr(ent.entity, proxy_key, c) + + return [ + { + "name": c.key, + "type": c.type, + "expr": _attr_for_col(c, ent), + "aliased": ent.is_aliased_class, + "entity": ent.entity, + } + for c, ent in [ + (c, _ent_for_col(c)) for c in statement._all_selected_columns + ] + ] + + +@CompileState.plugin_for("orm", "insert") +class ORMInsert(ORMDMLState, InsertDMLState): + pass + + @CompileState.plugin_for("orm", "update") -class BulkORMUpdate(UpdateDMLState, BulkUDCompileState): +class BulkORMUpdate(ORMDMLState, UpdateDMLState, BulkUDCompileState): @classmethod def create_for_statement(cls, statement, compiler, **kw): @@ -2356,7 +2408,7 @@ def _do_post_synchronize_fetch(cls, session, result, update_options): @CompileState.plugin_for("orm", "delete") -class BulkORMDelete(DeleteDMLState, BulkUDCompileState): +class BulkORMDelete(ORMDMLState, DeleteDMLState, BulkUDCompileState): @classmethod def create_for_statement(cls, statement, compiler, **kw): self = cls.__new__(cls) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 9378bc3a2a6..88910ba0624 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -2973,6 +2973,15 @@ def column_descriptions(self): } ] + .. seealso:: + + This API is available using :term:`2.0 style` queries as well, + documented at: + + * :ref:`queryguide_inspection` + + * :attr:`.Select.column_descriptions` + """ return _column_descriptions(self, legacy=True) diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index be5714451ba..943bb070546 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -48,6 +48,21 @@ class DMLState(CompileState): def __init__(self, statement, compiler, **kw): raise NotImplementedError() + @classmethod + def get_entity_description(cls, statement): + return {"name": statement.table.name, "table": statement.table} + + @classmethod + def get_returning_column_descriptions(cls, statement): + return [ + { + "name": c.key, + "type": c.type, + "expr": c, + } + for c in statement._all_selected_columns + ] + @property def dml_table(self): return self.statement.table @@ -486,6 +501,89 @@ def with_hint(self, text, selectable=None, dialect_name="*"): self._hints = self._hints.union({(selectable, dialect_name): text}) + @property + def entity_description(self): + """Return a :term:`plugin-enabled` description of the table and/or entity + which this DML construct is operating against. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor + is derived from the :attr:`.UpdateBase.table` attribute, and + refers to the :class:`.Table` being inserted, updated, or deleted:: + + >>> stmt = insert(user_table) + >>> stmt.entity_description + { + "name": "user_table", + "table": Table("user_table", ...) + } + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.returning_column_descriptions` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ + meth = DMLState.get_plugin_class(self).get_entity_description + return meth(self) + + @property + def returning_column_descriptions(self): + """Return a :term:`plugin-enabled` description of the columns + which this DML construct is RETURNING against, in other words + the expressions established as part of :meth:`.UpdateBase.returning`. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core statement, the structure returned by this accessor is + derived from the same objects that are returned by the + :attr:`.UpdateBase.exported_columns` accessor:: + + >>> stmt = insert(user_table).returning(user_table.c.id, user_table.c.name) + >>> stmt.entity_description + [ + { + "name": "id", + "type": Integer, + "expr": Column("id", Integer(), table=, ...) + }, + { + "name": "name", + "type": String(), + "expr": Column("name", String(), table=, ...) + }, + ] + + .. versionadded:: 1.4.33 + + .. seealso:: + + :attr:`.UpdateBase.entity_description` + + :attr:`.Select.column_descriptions` - entity information for + a :func:`.select` construct + + :ref:`queryguide_inspection` - ORM background + + """ # noqa E501 + meth = DMLState.get_plugin_class( + self + ).get_returning_column_descriptions + return meth(self) + class ValuesBase(UpdateBase): """Supplies support for :meth:`.ValuesBase.values` to diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 125c3724b82..028ed99a60c 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -4388,7 +4388,16 @@ def _plugin_not_implemented(cls): @classmethod def get_column_descriptions(cls, statement): - cls._plugin_not_implemented() + return [ + { + "name": name, + "type": element.type, + "expr": element, + } + for _, name, _, element, _ in ( + statement._generate_columns_plus_names(False) + ) + ] @classmethod def from_statement(cls, statement, from_statement): @@ -5331,8 +5340,43 @@ def filter_by(self, **kwargs): @property def column_descriptions(self): - """Return a 'column descriptions' structure which may be - :term:`plugin-specific`. + """Return a :term:`plugin-enabled` 'column descriptions' structure + referring to the columns which are SELECTed by this statement. + + This attribute is generally useful when using the ORM, as an + extended structure which includes information about mapped + entities is returned. The section :ref:`queryguide_inspection` + contains more background. + + For a Core-only statement, the structure returned by this accessor + is derived from the same objects that are returned by the + :attr:`.Select.selected_columns` accessor, formatted as a list of + dictionaries which contain the keys ``name``, ``type`` and ``expr``, + which indicate the column expressions to be selected:: + + >>> stmt = select(user_table) + >>> stmt.column_descriptions + [ + { + 'name': 'id', + 'type': Integer(), + 'expr': Column('id', Integer(), ...)}, + { + 'name': 'name', + 'type': String(length=30), + 'expr': Column('name', String(length=30), ...)} + ] + + .. versionchanged:: 1.4.33 The :attr:`.Select.column_descriptions` + attribute returns a structure for a Core-only set of entities, + not just ORM-only entities. + + .. seealso:: + + :attr:`.UpdateBase.entity_description` - entity information for + an :func:`.insert`, :func:`.update`, or :func:`.delete` + + :ref:`queryguide_inspection` - ORM background """ meth = SelectState.get_plugin_class(self).get_column_descriptions diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 7948c016ba2..0ebc9f6504b 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -1,5 +1,6 @@ from sqlalchemy import bindparam from sqlalchemy import Column +from sqlalchemy import delete from sqlalchemy import exc from sqlalchemy import func from sqlalchemy import insert @@ -12,6 +13,7 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import column_property @@ -112,6 +114,18 @@ def test_froms_join(self): } ], ), + ( + lambda user_alias: (user_alias,), + lambda User, user_alias: [ + { + "name": None, + "type": User, + "aliased": True, + "expr": user_alias, + "entity": user_alias, + } + ], + ), ( lambda User: (User.id,), lambda User: [ @@ -162,17 +176,101 @@ def test_froms_join(self): }, ], ), + ( + lambda user_table: (user_table,), + lambda user_table: [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.Integer), + "expr": user_table.c.id, + }, + { + "name": "name", + "type": testing.eq_type_affinity(sqltypes.String), + "expr": user_table.c.name, + }, + ], + ), ) def test_column_descriptions(self, cols, expected): User, Address = self.classes("User", "Address") + ua = aliased(User) - cols = testing.resolve_lambda(cols, User=User, Address=Address) - expected = testing.resolve_lambda(expected, User=User, Address=Address) + cols = testing.resolve_lambda( + cols, + User=User, + Address=Address, + user_alias=ua, + user_table=inspect(User).local_table, + ) + expected = testing.resolve_lambda( + expected, + User=User, + Address=Address, + user_alias=ua, + user_table=inspect(User).local_table, + ) stmt = select(*cols) - eq_(stmt.column_descriptions, expected) + @testing.combinations(insert, update, delete, argnames="dml_construct") + @testing.combinations( + ( + lambda User: User, + lambda User: (User.id, User.name), + lambda User, user_table: { + "name": "User", + "type": User, + "expr": User, + "entity": User, + "table": user_table, + }, + lambda User: [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.Integer), + "aliased": False, + "expr": User.id, + "entity": User, + }, + { + "name": "name", + "type": testing.eq_type_affinity(sqltypes.String), + "aliased": False, + "expr": User.name, + "entity": User, + }, + ], + ), + argnames="entity, cols, expected_entity, expected_returning", + ) + def test_dml_descriptions( + self, dml_construct, entity, cols, expected_entity, expected_returning + ): + User, Address = self.classes("User", "Address") + + lambda_args = dict( + User=User, + Address=Address, + user_table=inspect(User).local_table, + ) + entity = testing.resolve_lambda(entity, **lambda_args) + cols = testing.resolve_lambda(cols, **lambda_args) + expected_entity = testing.resolve_lambda( + expected_entity, **lambda_args + ) + expected_returning = testing.resolve_lambda( + expected_returning, **lambda_args + ) + + stmt = dml_construct(entity) + if cols: + stmt = stmt.returning(*cols) + + eq_(stmt.entity_description, expected_entity) + eq_(stmt.returning_column_descriptions, expected_returning) + class ColumnsClauseFromsTest(QueryTest, AssertsCompiledSQL): __dialect__ = "default" diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index f0df92b7051..eb577aa0235 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -4,11 +4,13 @@ from sqlalchemy import Boolean from sqlalchemy import cast from sqlalchemy import Column +from sqlalchemy import delete from sqlalchemy import exc from sqlalchemy import exists from sqlalchemy import false from sqlalchemy import ForeignKey from sqlalchemy import func +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import join from sqlalchemy import literal_column @@ -27,6 +29,7 @@ from sqlalchemy import type_coerce from sqlalchemy import TypeDecorator from sqlalchemy import union +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.sql import Alias from sqlalchemy.sql import annotation @@ -86,6 +89,135 @@ class SelectableTest( ): __dialect__ = "default" + @testing.combinations( + ( + (table1.c.col1, table1.c.col2), + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + ], + ), + ( + (table1,), + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + { + "name": "col3", + "type": table1.c.col3.type, + "expr": table1.c.col3, + }, + { + "name": "colx", + "type": table1.c.colx.type, + "expr": table1.c.colx, + }, + ], + ), + ( + (func.count(table1.c.col1),), + [ + { + "name": "count", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + } + ], + ), + ( + (func.count(table1.c.col1), func.count(table1.c.col2)), + [ + { + "name": "count", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + }, + { + "name": "count_1", + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col2) + ), + }, + ], + ), + ) + def test_core_column_descriptions(self, cols, expected): + stmt = select(*cols) + # reverse eq_ is so eq_clause_element works + eq_(expected, stmt.column_descriptions) + + @testing.combinations(insert, update, delete, argnames="dml_construct") + @testing.combinations( + ( + table1, + (table1.c.col1, table1.c.col2), + {"name": "table1", "table": table1}, + [ + { + "name": "col1", + "type": table1.c.col1.type, + "expr": table1.c.col1, + }, + { + "name": "col2", + "type": table1.c.col2.type, + "expr": table1.c.col2, + }, + ], + ), + ( + table1, + (func.count(table1.c.col1),), + {"name": "table1", "table": table1}, + [ + { + "name": None, + "type": testing.eq_type_affinity(Integer), + "expr": testing.eq_clause_element( + func.count(table1.c.col1) + ), + }, + ], + ), + ( + table1, + None, + {"name": "table1", "table": table1}, + [], + ), + argnames="entity, cols, expected_entity, expected_returning", + ) + def test_dml_descriptions( + self, dml_construct, entity, cols, expected_entity, expected_returning + ): + stmt = dml_construct(entity) + if cols: + stmt = stmt.returning(*cols) + + eq_(stmt.entity_description, expected_entity) + eq_(expected_returning, stmt.returning_column_descriptions) + def test_indirect_correspondence_on_labels(self): # this test depends upon 'distance' to # get the right result From c8dd7affff3c253256be81b5e912bcdd1359cf1a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 28 Mar 2022 18:39:19 -0400 Subject: [PATCH 176/632] apply loader criteria more specifically when refresh is true Fixed bug in :func:`_orm.with_loader_criteria` function where loader criteria would not be applied to a joined eager load that were invoked within the scope of a refresh operation for the parent object. Fixes: #7862 Change-Id: If1ac86eaa95880b5ec5bdeee292d6e8000aac705 (cherry picked from commit 9c52d9a507a738ae68f0a6eae09d87959995b981) --- doc/build/changelog/unreleased_14/7862.rst | 7 +++ lib/sqlalchemy/orm/context.py | 5 +- lib/sqlalchemy/orm/util.py | 3 +- test/orm/test_relationship_criteria.py | 61 ++++++++++++++++++++++ 4 files changed, 73 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7862.rst diff --git a/doc/build/changelog/unreleased_14/7862.rst b/doc/build/changelog/unreleased_14/7862.rst new file mode 100644 index 00000000000..00252ec8dd3 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7862.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, orm + :tickets: 7862 + + Fixed bug in :func:`_orm.with_loader_criteria` function where loader + criteria would not be applied to a joined eager load that were invoked + within the scope of a refresh operation for the parent object. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 7a63543a650..49d354cb3d2 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -2254,7 +2254,10 @@ def _adjust_for_extra_criteria(self): single_crit = ext_info.mapper._single_table_criterion - additional_entity_criteria = self._get_extra_criteria(ext_info) + if self.compile_options._for_refresh_state: + additional_entity_criteria = [] + else: + additional_entity_criteria = self._get_extra_criteria(ext_info) if single_crit is not None: additional_entity_criteria += (single_crit,) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 0cd6b8f41c6..9ec2ad0768f 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1202,8 +1202,7 @@ def process_compile_state(self, compile_state): "Please migrate code to use the with_polymorphic() standalone " "function before using with_loader_criteria()." ) - if not compile_state.compile_options._for_refresh_state: - self.get_global_criteria(compile_state.global_attributes) + self.get_global_criteria(compile_state.global_attributes) def get_global_criteria(self, attributes): for mp in self._all_mappers(): diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 932f80d9f59..5f47b49ac7a 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -55,6 +55,33 @@ def user_address_fixture(self): ) return User, Address + @testing.fixture + def user_address_custom_strat_fixture(self): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + def go(strat): + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + self.mapper_registry.map_imperatively( + Address, addresses + ), + lazy=strat, + order_by=Address.id, + ) + }, + ) + return User, Address + + return go + @testing.fixture def order_item_fixture(self): Order, Item = self.classes("Order", "Item") @@ -220,6 +247,40 @@ def test_criteria_post_replace(self, user_address_fixture): "WHERE users.name != :name_1", ) + @testing.combinations( + "select", + "joined", + "subquery", + "selectin", + "immediate", + argnames="loader_strategy", + ) + def test_loader_strategy_on_refresh( + self, loader_strategy, user_address_custom_strat_fixture + ): + User, Address = user_address_custom_strat_fixture(loader_strategy) + + sess = fixture_session() + + @event.listens_for(sess, "do_orm_execute") + def add_criteria(orm_context): + orm_context.statement = orm_context.statement.options( + with_loader_criteria( + Address, + ~Address.id.in_([5, 3]), + ) + ) + + u1 = sess.get(User, 7) + u2 = sess.get(User, 8) + eq_(u1.addresses, [Address(id=1)]) + eq_(u2.addresses, [Address(id=2), Address(id=4)]) + + for i in range(3): + sess.expire_all() + eq_(u1.addresses, [Address(id=1)]) + eq_(u2.addresses, [Address(id=2), Address(id=4)]) + def test_criteria_post_replace_legacy(self, user_address_fixture): User, Address = user_address_fixture From 02f50ded0b8897ab10f5c5db0336f1f3f68451ef Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 29 Mar 2022 09:48:24 -0400 Subject: [PATCH 177/632] use annotated entity when adding secondary Fixed regression in "dynamic" loader strategy where the :meth:`_orm.Query.filter_by` method would not be given an appropriate entity to filter from, in the case where a "secondary" table were present in the relationship being queried and the mapping were against something complex such as a "with polymorphic". Fixes: #7868 Change-Id: I3b82eec6485c5a92b56a596da0cfb009e9e67883 (cherry picked from commit a55476fbdbc9b4e192a052b81dfe7e750d6241e4) --- doc/build/changelog/unreleased_14/7868.rst | 9 +++ lib/sqlalchemy/orm/dynamic.py | 5 +- test/orm/test_dynamic.py | 70 +++++++++++++++++++++- 3 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7868.rst diff --git a/doc/build/changelog/unreleased_14/7868.rst b/doc/build/changelog/unreleased_14/7868.rst new file mode 100644 index 00000000000..d57b22296e5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7868.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7868 + + Fixed regression in "dynamic" loader strategy where the + :meth:`_orm.Query.filter_by` method would not be given an appropriate + entity to filter from, in the case where a "secondary" table were present + in the relationship being queried and the mapping were against something + complex such as a "with polymorphic". diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index 5d74bbffd53..ec625601178 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -302,7 +302,10 @@ def __init__(self, attr, state): # is in the FROM. So we purposely put the mapper selectable # in _from_obj[0] to ensure a user-defined join() later on # doesn't fail, and secondary is then in _from_obj[1]. - self._from_obj = (prop.mapper.selectable, prop.secondary) + + # note also, we are using the official ORM-annotated selectable + # from __clause_element__(), see #7868 + self._from_obj = (prop.mapper.__clause_element__(), prop.secondary) self._where_criteria = ( prop._with_parent(instance, alias_secondary=False), diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py index 2a8e3e2dc40..0cb4d76d9c2 100644 --- a/test/orm/test_dynamic.py +++ b/test/orm/test_dynamic.py @@ -1,10 +1,13 @@ from sqlalchemy import cast +from sqlalchemy import Column from sqlalchemy import desc from sqlalchemy import exc +from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.orm import attributes from sqlalchemy.orm import backref @@ -13,6 +16,7 @@ from sqlalchemy.orm import noload from sqlalchemy.orm import Query from sqlalchemy.orm import relationship +from sqlalchemy.orm.session import make_transient_to_detached from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import assert_warns_message @@ -125,6 +129,8 @@ def _user_order_item_fixture(self): class DynamicTest(_DynamicFixture, _fixtures.FixtureTest, AssertsCompiledSQL): + __dialect__ = "default" + def test_basic(self): User, Address = self._user_address_fixture() sess = fixture_session() @@ -598,11 +604,17 @@ def test_secondary_as_join(self): ) }, ) - self.mapper_registry.map_imperatively(Item, items) + item_mapper = self.mapper_registry.map_imperatively(Item, items) sess = fixture_session() + u1 = sess.query(User).first() + dyn = u1.items + + # test for #7868 + eq_(dyn._from_obj[0]._annotations["parententity"], item_mapper) + self.assert_compile( u1.items, "SELECT items.id AS items_id, " @@ -614,6 +626,62 @@ def test_secondary_as_join(self): use_default_dialect=True, ) + def test_secondary_as_join_complex_entity(self, registry): + """integration test for #7868""" + Base = registry.generate_base() + + class GrandParent(Base): + __tablename__ = "grandparent" + id = Column(Integer, primary_key=True) + + grand_children = relationship( + "Child", secondary="parent", lazy="dynamic", viewonly=True + ) + + class Parent(Base): + __tablename__ = "parent" + id = Column(Integer, primary_key=True) + grand_parent_id = Column( + Integer, ForeignKey("grandparent.id"), nullable=False + ) + + class Child(Base): + __tablename__ = "child" + id = Column(Integer, primary_key=True) + type = Column(String) + parent_id = Column( + Integer, ForeignKey("parent.id"), nullable=False + ) + + __mapper_args__ = { + "polymorphic_on": type, + "polymorphic_identity": "unknown", + "with_polymorphic": "*", + } + + class SubChild(Child): + __tablename__ = "subchild" + id = Column(Integer, ForeignKey("child.id"), primary_key=True) + + __mapper_args__ = { + "polymorphic_identity": "sub", + } + + gp = GrandParent(id=1) + make_transient_to_detached(gp) + sess = fixture_session() + sess.add(gp) + self.assert_compile( + gp.grand_children.filter_by(id=1), + "SELECT child.id AS child_id, child.type AS child_type, " + "child.parent_id AS child_parent_id, subchild.id AS subchild_id " + "FROM parent, child LEFT OUTER JOIN subchild " + "ON child.id = subchild.id " + "WHERE :param_1 = parent.grand_parent_id " + "AND parent.id = child.parent_id AND child.id = :id_1", + {"id_1": 1}, + ) + def test_secondary_doesnt_interfere_w_join_to_fromlist(self): # tests that the "secondary" being added to the FROM # as part of [ticket:4349] does not prevent a subsequent join to From a4b3668f274a739d274b124d8302a58f254ebe45 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 29 Mar 2022 22:53:31 +0200 Subject: [PATCH 178/632] Fix spacing on ``bindparam`` docs Change-Id: I9daad5293a4887734f14e6e5de6014aca415453c --- lib/sqlalchemy/sql/elements.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index fbb02d9258c..2c424a5f54a 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1458,7 +1458,7 @@ def __init__( supports empty lists. - .. seealso:: + .. seealso:: :ref:`coretutorial_bind_param` From d7247509bcff8574adc9e0e431ae24a4c51f7c7f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 30 Mar 2022 10:52:29 -0400 Subject: [PATCH 179/632] changelog fixes Change-Id: I24e3de82d656d59719fd067bc81b2948e725b487 (cherry picked from commit 731262b17e0678b2549bf5d41adfd9c82ced01cc) --- doc/build/changelog/unreleased_14/7799.rst | 2 +- doc/build/changelog/unreleased_14/7812.rst | 7 ++++--- doc/build/changelog/unreleased_14/7860.rst | 6 +++--- doc/build/core/metadata.rst | 8 ++++++++ lib/sqlalchemy/sql/schema.py | 10 +++++++++- 5 files changed, 25 insertions(+), 8 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7799.rst b/doc/build/changelog/unreleased_14/7799.rst index 00254738ee9..8d88d23bc19 100644 --- a/doc/build/changelog/unreleased_14/7799.rst +++ b/doc/build/changelog/unreleased_14/7799.rst @@ -2,7 +2,7 @@ :tags: bug, orm :tickets: 7799 - Fixed issue where the :func:`_orm.polymorphic_selectin` loader option would + Fixed issue where the :func:`_orm.selectin_polymorphic` loader option would not work with joined inheritance mappers that don't have a fixed "polymorphic_on" column. Additionally added test support for a wider variety of usage patterns with this construct. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7812.rst b/doc/build/changelog/unreleased_14/7812.rst index 7e28428acfa..00c7d3834ab 100644 --- a/doc/build/changelog/unreleased_14/7812.rst +++ b/doc/build/changelog/unreleased_14/7812.rst @@ -2,6 +2,7 @@ :tags: bug, mssql, regression :tickets: 7812 - Fixed regression caused by :ticket:`7160` where FK reflection on a very - old database (compatibility level 80: SQL Server 2000) causes an - "Ambiguous column name" error. Patch courtesy of @Lin-Your. + Fixed regression caused by :ticket:`7160` where FK reflection in + conjunction with a low compatibility level setting (compatibility level 80: + SQL Server 2000) causes an "Ambiguous column name" error. Patch courtesy + @Lin-Your. diff --git a/doc/build/changelog/unreleased_14/7860.rst b/doc/build/changelog/unreleased_14/7860.rst index cb6bcc59fce..40ecf38fda4 100644 --- a/doc/build/changelog/unreleased_14/7860.rst +++ b/doc/build/changelog/unreleased_14/7860.rst @@ -2,10 +2,10 @@ :tags: usecase, schema :tickets: 7860 - Added support so that the :paramref:`.Table.tometadata.referred_schema_fn` + Added support so that the :paramref:`.Table.to_metadata.referred_schema_fn` callable passed to :meth:`.Table.to_metadata` may return the value - :data:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be - reset to None. The :data.`RETAIN_SCHEMA` symbol may also be returned from + :attr:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be + reset to None. The :attr:`RETAIN_SCHEMA` symbol may also be returned from this function to indicate "no change", which will behave the same as ``None`` currently does which also indicates no change. diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index c7316d1b650..a8fc2e47484 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -572,6 +572,14 @@ Column, Table, MetaData API .. versionadded:: 1.0.14 +.. attribute:: sqlalchemy.schema.RETAIN_SCHEMA + + Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object, in situations + where the object is being copied for a :meth:`.MetaData.to_metadata` + operation, should retain the schema name that it already has. + + .. autoclass:: Column :members: diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 7c9fef644cf..0667370be66 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -58,7 +58,15 @@ from .. import util -RETAIN_SCHEMA = util.symbol("retain_schema") +RETAIN_SCHEMA = util.symbol( + "retain_schema" + """Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` + or in some cases a :class:`_schema.ForeignKey` object, in situations + where the object is being copied for a :meth:`.MetaData.to_metadata` + operation, should retain the schema name that it already has. + + """ +) BLANK_SCHEMA = util.symbol( "blank_schema", From aade1973639517dff06d9f5147c2ec67fc5e3a8d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 30 Mar 2022 11:44:23 -0400 Subject: [PATCH 180/632] further changelog / doc fixes Change-Id: I959f242272ff5147b7c1f721cf119d09309a8c57 (cherry picked from commit 9731484f5c991b56e64349ef7b9775ef83b18402) --- doc/build/changelog/unreleased_14/7860.rst | 2 +- doc/build/core/metadata.rst | 2 +- lib/sqlalchemy/sql/schema.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7860.rst b/doc/build/changelog/unreleased_14/7860.rst index 40ecf38fda4..cc41aa00bfb 100644 --- a/doc/build/changelog/unreleased_14/7860.rst +++ b/doc/build/changelog/unreleased_14/7860.rst @@ -5,7 +5,7 @@ Added support so that the :paramref:`.Table.to_metadata.referred_schema_fn` callable passed to :meth:`.Table.to_metadata` may return the value :attr:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be - reset to None. The :attr:`RETAIN_SCHEMA` symbol may also be returned from + reset to None. The :attr:`.RETAIN_SCHEMA` symbol may also be returned from this function to indicate "no change", which will behave the same as ``None`` currently does which also indicates no change. diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index a8fc2e47484..7c5b7dd668d 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -576,7 +576,7 @@ Column, Table, MetaData API Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` or in some cases a :class:`_schema.ForeignKey` object, in situations - where the object is being copied for a :meth:`.MetaData.to_metadata` + where the object is being copied for a :meth:`.Table.to_metadata` operation, should retain the schema name that it already has. diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 0667370be66..621efd36ce1 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -62,7 +62,7 @@ "retain_schema" """Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` or in some cases a :class:`_schema.ForeignKey` object, in situations - where the object is being copied for a :meth:`.MetaData.to_metadata` + where the object is being copied for a :meth:`.Table.to_metadata` operation, should retain the schema name that it already has. """ From b1aaf0a29fdb950ac69964fb9f96dbc03cddd139 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 30 Mar 2022 22:50:18 +0200 Subject: [PATCH 181/632] Update bindparam cache key The ``literal_execute`` parameter now takes part of the cache generation of a bindparam, since it changes the sql string generated by the compiler. Previously the correct bind values were used, but the ``literal_execute`` would be ignored on subsequent executions of the same query. Fixes: #7876 Change-Id: I6bf887f1a2fe31f9d0ab68f5b4ff315004d006b2 (cherry picked from commit 429512d55e814b03854bc12ec541dbeee9e3b94e) --- doc/build/changelog/unreleased_14/7876.rst | 9 +++++++++ lib/sqlalchemy/sql/elements.py | 2 ++ test/sql/test_compare.py | 21 +++++++++++++++++++++ 3 files changed, 32 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7876.rst diff --git a/doc/build/changelog/unreleased_14/7876.rst b/doc/build/changelog/unreleased_14/7876.rst new file mode 100644 index 00000000000..c3b1c77b75c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7876.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 7876 + + The :paramref:`.bindparam.literal_execute` parameter now takes part + of the cache generation of a :func:`.bindparam`, since it changes + the sql string generated by the compiler. + Previously the correct bind values were used, but the ``literal_execute`` + would be ignored on subsequent executions of the same query. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 2c424a5f54a..81645ad0a0a 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1230,6 +1230,7 @@ class BindParameter(roles.InElementRole, ColumnElement): ("type", InternalTraversal.dp_type), ("callable", InternalTraversal.dp_plain_dict), ("value", InternalTraversal.dp_plain_obj), + ("literal_execute", InternalTraversal.dp_boolean), ] _is_crud = False @@ -1663,6 +1664,7 @@ def _gen_cache_key(self, anon_map, bindparams): self.__class__, self.type._static_cache_key, self.key % anon_map if self._key_is_anon else self.key, + self.literal_execute, ) def _convert_to_unique(self): diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index a4684cccffa..26340d21d45 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -285,6 +285,7 @@ class CoreFixtures(object): ), lambda: ( bindparam("x"), + bindparam("x", literal_execute=True), bindparam("y"), bindparam("x", type_=Integer), bindparam("x", type_=String), @@ -1648,6 +1649,7 @@ def test_compare_labels(self): def test_compare_binds(self): b1 = bindparam("foo", type_=Integer()) + b1l = bindparam("foo", type_=Integer(), literal_execute=True) b2 = bindparam("foo", type_=Integer()) b3 = bindparam("foo", type_=String()) @@ -1658,6 +1660,9 @@ def c2(): return 6 b4 = bindparam("foo", type_=Integer(), callable_=c1) + b4l = bindparam( + "foo", type_=Integer(), callable_=c1, literal_execute=True + ) b5 = bindparam("foo", type_=Integer(), callable_=c2) b6 = bindparam("foo", type_=Integer(), callable_=c1) @@ -1678,6 +1683,22 @@ def c2(): is_false(b7.compare(b8)) is_true(b7.compare(b7)) + # cache key + def compare_key(left, right, expected): + lk = left._generate_cache_key().key + rk = right._generate_cache_key().key + is_(lk == rk, expected) + + compare_key(b1, b4, True) + compare_key(b1, b5, True) + compare_key(b8, b5, True) + compare_key(b8, b7, True) + compare_key(b8, b3, False) + compare_key(b1, b1l, False) + compare_key(b1, b4l, False) + compare_key(b4, b4l, False) + compare_key(b7, b4l, False) + def test_compare_tables(self): is_true(table_a.compare(table_a_2)) From df3b4522c0f9e199cb05fe1129f53473e7da26fb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 09:08:11 -0400 Subject: [PATCH 182/632] add close=False parameter to engine.dispose() Added new parameter :paramref:`.Engine.dispose.close`, defaulting to True. When False, the engine disposal does not touch the connections in the old pool at all, simply dropping the pool and replacing it. This use case is so that when the original pool is transferred from a parent process, the parent process may continue to use those connections. Fixes: #7877 Change-Id: I88b0808442381ba5e50674787cdb64f0e77d8b54 (cherry picked from commit 87a0f7183de4e8454483c7348bf486265bfe1c4d) --- doc/build/changelog/unreleased_14/7877.rst | 13 ++++ doc/build/core/connections.rst | 11 ++- doc/build/core/pooling.rst | 89 +++++++++++++--------- lib/sqlalchemy/engine/base.py | 47 +++++++----- test/engine/test_execute.py | 25 ++++++ 5 files changed, 131 insertions(+), 54 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7877.rst diff --git a/doc/build/changelog/unreleased_14/7877.rst b/doc/build/changelog/unreleased_14/7877.rst new file mode 100644 index 00000000000..d6ad6facd55 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7877.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: usecase, engine + :tickets: 7877, 7815 + + Added new parameter :paramref:`.Engine.dispose.close`, defaulting to True. + When False, the engine disposal does not touch the connections in the old + pool at all, simply dropping the pool and replacing it. This use case is so + that when the original pool is transferred from a parent process, the + parent process may continue to use those connections. + + .. seealso:: + + :ref:`pooling_multiprocessing` - revised documentation diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 6395c3c6b9c..3c2875bb4c1 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1904,7 +1904,10 @@ Valid use cases for calling :meth:`_engine.Engine.dispose` include: :class:`_engine.Engine` object is copied to the child process, :meth:`_engine.Engine.dispose` should be called so that the engine creates brand new database connections local to that fork. Database connections - generally do **not** travel across process boundaries. + generally do **not** travel across process boundaries. Use the + :paramref:`.Engine.dispose.close` parameter set to False in this case. + See the section :ref:`pooling_multiprocessing` for more background on this + use case. * Within test suites or multitenancy scenarios where many ad-hoc, short-lived :class:`_engine.Engine` objects may be created and disposed. @@ -1929,6 +1932,12 @@ use of new connections, and means that when a connection is checked in, it is entirely closed out and is not held in memory. See :ref:`pool_switching` for guidelines on how to disable pooling. +.. seealso:: + + :ref:`pooling_toplevel` + + :ref:`pooling_multiprocessing` + .. _dbapi_connections: Working with Driver SQL and Raw DBAPI Connections diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 6b2735a5d48..c6ef94a0a7a 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -479,45 +479,62 @@ are three general approaches to this: engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) -2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine` - **directly before** the new process is started, so that the new process - will create new connections, as well as not attempt to close connections that - were shared from the parent which can impact the parent's subsequent - use of those connections. **This is the recommended approach**:: - - engine = create_engine("mysql://user:pass@host/dbname") - - def run_in_process(): - with engine.connect() as conn: - conn.execute(text("...")) - - # before process starts, ensure engine.dispose() is called - engine.dispose() - p = Process(target=run_in_process) - p.start() - -3. Alternatively, if the :class:`_engine.Engine` is only to be used in - child processes, and will not be used from the parent process subsequent - to the creation of child forks, the dispose may be within the child process - right as it begins:: - - engine = create_engine("mysql+mysqldb://user:pass@host/dbname") - - def run_in_process(): - # process starts. ensure engine.dispose() is called just once - # at the beginning. note this cause parent process connections - # to be closed for most drivers - engine.dispose() +2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`, + passing the :paramref:`.Engine.dispose.close` parameter with a value of + ``False``, within the initialize phase of the child process. This is + so that the new process will not touch any of the parent process' connections + and will instead start with new connections. + **This is the recommended approach**:: + + from multiprocessing import Pool + + engine = create_engine("mysql+mysqldb://user:pass@host/dbname") + + def run_in_process(some_data_record): + with engine.connect() as conn: + conn.execute(text("...")) + + def initializer(): + """ensure the parent proc's database connections are not touched + in the new connection pool""" + engine.dispose(close=False) + + with Pool(10, initializer=initializer) as p: + p.map(run_in_process, data) + - with engine.connect() as conn: - conn.execute(text("...")) + .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` + parameter to allow the replacement of a connection pool in a child + process without interfering with the connections used by the parent + process. - p = Process(target=run_in_process) - p.start() + To achieve the same "dispose without close" behavior prior to version + 1.4.33 (all SQLAlchemy versions), instead of calling + :meth:`.Engine.dispose`, replace the :class:`.Pool` directly using + :meth:`.Pool.recreate`:: - # after child process starts, "engine" above should not be used within - # the parent process for connectivity, without calling - # engine.dispose() first + engine.pool = engine.pool.recreate() + + The above code is equivalent to ``engine.dispose(close=False)`` with the + exception that the :meth:`.ConnectionEvents.engine_disposed` end-user + event hook is not invoked; assuming end-user code is not making use of + this hook, this workaround has no other negative effects. + +3. Call :meth:`.Engine.dispose` **directly before** the child process is + created. This will also cause the child process to start with a new + connection pool, while ensuring the parent connections are not transferred + to the child process:: + + engine = create_engine("mysql://user:pass@host/dbname") + + def run_in_process(): + with engine.connect() as conn: + conn.execute(text("...")) + + # before process starts, ensure engine.dispose() is called + engine.dispose() + p = Process(target=run_in_process) + p.start() 4. An event handler can be applied to the connection pool that tests for connections being shared across process boundaries, and invalidates them:: diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index b5a3096e5bb..eca4a9e10aa 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -2948,32 +2948,45 @@ def driver(self): def __repr__(self): return "Engine(%r)" % (self.url,) - def dispose(self): + def dispose(self, close=True): """Dispose of the connection pool used by this :class:`_engine.Engine`. - This has the effect of fully closing all **currently checked in** - database connections. Connections that are still checked out - will **not** be closed, however they will no longer be associated - with this :class:`_engine.Engine`, - so when they are closed individually, - eventually the :class:`_pool.Pool` which they are associated with will - be garbage collected and they will be closed out fully, if - not already closed on checkin. - - A new connection pool is created immediately after the old one has - been disposed. This new pool, like all SQLAlchemy connection pools, - does not make any actual connections to the database until one is - first requested, so as long as the :class:`_engine.Engine` - isn't used again, - no new connections will be made. + A new connection pool is created immediately after the old one has been + disposed. The previous connection pool is disposed either actively, by + closing out all currently checked-in connections in that pool, or + passively, by losing references to it but otherwise not closing any + connections. The latter strategy is more appropriate for an initializer + in a forked Python process. + + :param close: if left at its default of ``True``, has the + effect of fully closing all **currently checked in** + database connections. Connections that are still checked out + will **not** be closed, however they will no longer be associated + with this :class:`_engine.Engine`, + so when they are closed individually, eventually the + :class:`_pool.Pool` which they are associated with will + be garbage collected and they will be closed out fully, if + not already closed on checkin. + + If set to ``False``, the previous connection pool is de-referenced, + and otherwise not touched in any way. + + .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` + parameter to allow the replacement of a connection pool in a child + process without interfering with the connections used by the parent + process. + .. seealso:: :ref:`engine_disposal` + :ref:`pooling_multiprocessing` + """ - self.pool.dispose() + if close: + self.pool.dispose() self.pool = self.pool.recreate() self.dispatch.engine_disposed(self) diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index f462a7035cc..2a61bd1f636 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -1,6 +1,7 @@ # coding: utf-8 from contextlib import contextmanager +import copy import re import threading import weakref @@ -2191,6 +2192,30 @@ def test_dispose_event(self, testing_engine): eq_(canary.mock_calls, [call(eng), call(eng)]) + @testing.requires.ad_hoc_engines + @testing.combinations(True, False, argnames="close") + def test_close_parameter(self, testing_engine, close): + eng = testing_engine( + options=dict(pool_size=1, max_overflow=0, poolclass=QueuePool) + ) + + conn = eng.connect() + dbapi_conn_one = conn.connection.dbapi_connection + conn.close() + + eng_copy = copy.copy(eng) + eng_copy.dispose(close=close) + copy_conn = eng_copy.connect() + dbapi_conn_two = copy_conn.connection.dbapi_connection + + is_not(dbapi_conn_one, dbapi_conn_two) + + conn = eng.connect() + if close: + is_not(dbapi_conn_one, conn.connection.dbapi_connection) + else: + is_(dbapi_conn_one, conn.connection.dbapi_connection) + def test_retval_flag(self): canary = [] From 9ed013bbc391fa3a7fcb82a62371c90d3d464158 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 10:07:16 -0400 Subject: [PATCH 183/632] - 1.4.33 --- doc/build/changelog/changelog_14.rst | 180 ++++++++++++++++++++- doc/build/changelog/unreleased_14/5463.rst | 8 - doc/build/changelog/unreleased_14/7798.rst | 9 -- doc/build/changelog/unreleased_14/7799.rst | 8 - doc/build/changelog/unreleased_14/7801.rst | 8 - doc/build/changelog/unreleased_14/7805.rst | 9 -- doc/build/changelog/unreleased_14/7812.rst | 8 - doc/build/changelog/unreleased_14/7823.rst | 9 -- doc/build/changelog/unreleased_14/7827.rst | 10 -- doc/build/changelog/unreleased_14/7842.rst | 12 -- doc/build/changelog/unreleased_14/7845.rst | 11 -- doc/build/changelog/unreleased_14/7853.rst | 10 -- doc/build/changelog/unreleased_14/7860.rst | 11 -- doc/build/changelog/unreleased_14/7861.rst | 10 -- doc/build/changelog/unreleased_14/7862.rst | 7 - doc/build/changelog/unreleased_14/7868.rst | 9 -- doc/build/changelog/unreleased_14/7876.rst | 9 -- doc/build/changelog/unreleased_14/7877.rst | 13 -- doc/build/conf.py | 4 +- 19 files changed, 181 insertions(+), 164 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/5463.rst delete mode 100644 doc/build/changelog/unreleased_14/7798.rst delete mode 100644 doc/build/changelog/unreleased_14/7799.rst delete mode 100644 doc/build/changelog/unreleased_14/7801.rst delete mode 100644 doc/build/changelog/unreleased_14/7805.rst delete mode 100644 doc/build/changelog/unreleased_14/7812.rst delete mode 100644 doc/build/changelog/unreleased_14/7823.rst delete mode 100644 doc/build/changelog/unreleased_14/7827.rst delete mode 100644 doc/build/changelog/unreleased_14/7842.rst delete mode 100644 doc/build/changelog/unreleased_14/7845.rst delete mode 100644 doc/build/changelog/unreleased_14/7853.rst delete mode 100644 doc/build/changelog/unreleased_14/7860.rst delete mode 100644 doc/build/changelog/unreleased_14/7861.rst delete mode 100644 doc/build/changelog/unreleased_14/7862.rst delete mode 100644 doc/build/changelog/unreleased_14/7868.rst delete mode 100644 doc/build/changelog/unreleased_14/7876.rst delete mode 100644 doc/build/changelog/unreleased_14/7877.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index fa70f560645..322c96000d9 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,185 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.33 - :include_notes_from: unreleased_14 + :released: March 31, 2022 + + .. change:: + :tags: bug, engine + :tickets: 7853 + + Further clarified connection-level logging to indicate the BEGIN, ROLLBACK + and COMMIT log messages do not actually indicate a real transaction when + the AUTOCOMMIT isolation level is in use; messaging has been extended to + include the BEGIN message itself, and the messaging has also been fixed to + accommodate when the :class:`.Engine` level + :paramref:`.create_engine.isolation_level` parameter was used directly. + + .. change:: + :tags: bug, mssql, regression + :tickets: 7812 + + Fixed regression caused by :ticket:`7160` where FK reflection in + conjunction with a low compatibility level setting (compatibility level 80: + SQL Server 2000) causes an "Ambiguous column name" error. Patch courtesy + @Lin-Your. + + .. change:: + :tags: usecase, schema + :tickets: 7860 + + Added support so that the :paramref:`.Table.to_metadata.referred_schema_fn` + callable passed to :meth:`.Table.to_metadata` may return the value + :attr:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be + reset to None. The :attr:`.RETAIN_SCHEMA` symbol may also be returned from + this function to indicate "no change", which will behave the same as + ``None`` currently does which also indicates no change. + + + .. change:: + :tags: bug, sqlite, reflection + :tickets: 5463 + + Fixed bug where the name of CHECK constraints under SQLite would not be + reflected if the name were created using quotes, as is the case when the + name uses mixed case or special characters. + + + .. change:: + :tags: bug, orm, regression + :tickets: 7868 + + Fixed regression in "dynamic" loader strategy where the + :meth:`_orm.Query.filter_by` method would not be given an appropriate + entity to filter from, in the case where a "secondary" table were present + in the relationship being queried and the mapping were against something + complex such as a "with polymorphic". + + .. change:: + :tags: bug, orm + :tickets: 7801 + + Fixed bug where :func:`_orm.composite` attributes would not work in + conjunction with the :func:`_orm.selectin_polymorphic` loader strategy for + joined table inheritance. + + + .. change:: + :tags: bug, orm, performance + :tickets: 7823 + + Improvements in memory usage by the ORM, removing a significant set of + intermediary expression objects that are typically stored when a copy of an + expression object is created. These clones have been greatly reduced, + reducing the number of total expression objects stored in memory by + ORM mappings by about 30%. + + .. change:: + :tags: usecase, orm + :tickets: 7805 + + Added :paramref:`_orm.with_polymorphic.adapt_on_names` to the + :func:`_orm.with_polymorphic` function, which allows a polymorphic load + (typically with concrete mapping) to be stated against an alternative + selectable that will adapt to the original mapped selectable on column + names alone. + + .. change:: + :tags: usecase, sql + :tickets: 7845 + + Added new parameter + :paramref:`.FunctionElement.table_valued.joins_implicitly`, for the + :meth:`.FunctionElement.table_valued` construct. This parameter indicates + that the given table-valued function implicitly joins to the table it + refers towards, essentially disabling the "from linting" feature, i.e. the + "cartesian product" warning, from taking effect due to the presence of this + parameter. May be used for functions such as ``func.json_each()``. + + .. change:: + :tags: usecase, engine + :tickets: 7877, 7815 + + Added new parameter :paramref:`.Engine.dispose.close`, defaulting to True. + When False, the engine disposal does not touch the connections in the old + pool at all, simply dropping the pool and replacing it. This use case is so + that when the original pool is transferred from a parent process, the + parent process may continue to use those connections. + + .. seealso:: + + :ref:`pooling_multiprocessing` - revised documentation + + .. change:: + :tags: bug, orm + :tickets: 7799 + + Fixed issue where the :func:`_orm.selectin_polymorphic` loader option would + not work with joined inheritance mappers that don't have a fixed + "polymorphic_on" column. Additionally added test support for a wider + variety of usage patterns with this construct. + + .. change:: + :tags: usecase, orm + :tickets: 7861 + + Added new attributes :attr:`.UpdateBase.returning_column_descriptions` and + :attr:`.UpdateBase.entity_description` to allow for inspection of ORM + attributes and entities that are installed as part of an :class:`.Insert`, + :class:`.Update`, or :class:`.Delete` construct. The + :attr:`.Select.column_descriptions` accessor is also now implemented for + Core-only selectables. + + .. change:: + :tags: bug, sql + :tickets: 7876 + + The :paramref:`.bindparam.literal_execute` parameter now takes part + of the cache generation of a :func:`.bindparam`, since it changes + the sql string generated by the compiler. + Previously the correct bind values were used, but the ``literal_execute`` + would be ignored on subsequent executions of the same query. + + .. change:: + :tags: bug, orm + :tickets: 7862 + + Fixed bug in :func:`_orm.with_loader_criteria` function where loader + criteria would not be applied to a joined eager load that were invoked + within the scope of a refresh operation for the parent object. + + .. change:: + :tags: bug, orm + :tickets: 7842 + + Fixed issue where the :class:`_orm.Mapper` would reduce a user-defined + :paramref:`_orm.Mapper.primary_key` argument too aggressively, in the case + of mapping to a ``UNION`` where for some of the SELECT entries, two columns + are essentially equivalent, but in another, they are not, such as in a + recursive CTE. The logic here has been changed to accept a given + user-defined PK as given, where columns will be related to the mapped + selectable but no longer "reduced" as this heuristic can't accommodate for + all situations. + + .. change:: + :tags: bug, ext + :tickets: 7827 + + Improved the error message that's raised for the case where the + :func:`.association_proxy` construct attempts to access a target attribute + at the class level, and this access fails. The particular use case here is + when proxying to a hybrid attribute that does not include a working + class-level implementation. + + + .. change:: + :tags: bug, sql, regression + :tickets: 7798 + + Fixed regression caused by :ticket:`7760` where the new capabilities of + :class:`.TextualSelect` were not fully implemented within the compiler + properly, leading to issues with composed INSERT constructs such as "INSERT + FROM SELECT" and "INSERT...ON CONFLICT" when combined with CTE and textual + statements. .. changelog:: :version: 1.4.32 diff --git a/doc/build/changelog/unreleased_14/5463.rst b/doc/build/changelog/unreleased_14/5463.rst deleted file mode 100644 index 5de6182acf5..00000000000 --- a/doc/build/changelog/unreleased_14/5463.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, sqlite, reflection - :tickets: 5463 - - Fixed bug where the name of CHECK constraints under SQLite would not be - reflected if the name were created using quotes, as is the case when the - name uses mixed case or special characters. - diff --git a/doc/build/changelog/unreleased_14/7798.rst b/doc/build/changelog/unreleased_14/7798.rst deleted file mode 100644 index 31a5bb2e421..00000000000 --- a/doc/build/changelog/unreleased_14/7798.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql, regression - :tickets: 7798 - - Fixed regression caused by :ticket:`7760` where the new capabilities of - :class:`.TextualSelect` were not fully implemented within the compiler - properly, leading to issues with composed INSERT constructs such as "INSERT - FROM SELECT" and "INSERT...ON CONFLICT" when combined with CTE and textual - statements. diff --git a/doc/build/changelog/unreleased_14/7799.rst b/doc/build/changelog/unreleased_14/7799.rst deleted file mode 100644 index 8d88d23bc19..00000000000 --- a/doc/build/changelog/unreleased_14/7799.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7799 - - Fixed issue where the :func:`_orm.selectin_polymorphic` loader option would - not work with joined inheritance mappers that don't have a fixed - "polymorphic_on" column. Additionally added test support for a wider - variety of usage patterns with this construct. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7801.rst b/doc/build/changelog/unreleased_14/7801.rst deleted file mode 100644 index 4df3bdf8764..00000000000 --- a/doc/build/changelog/unreleased_14/7801.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7801 - - Fixed bug where :func:`_orm.composite` attributes would not work in - conjunction with the :func:`_orm.selectin_polymorphic` loader strategy for - joined table inheritance. - diff --git a/doc/build/changelog/unreleased_14/7805.rst b/doc/build/changelog/unreleased_14/7805.rst deleted file mode 100644 index 2d2940239a6..00000000000 --- a/doc/build/changelog/unreleased_14/7805.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: usecase, orm - :tickets: 7805 - - Added :paramref:`_orm.with_polymorphic.adapt_on_names` to the - :func:`_orm.with_polymorphic` function, which allows a polymorphic load - (typically with concrete mapping) to be stated against an alternative - selectable that will adapt to the original mapped selectable on column - names alone. diff --git a/doc/build/changelog/unreleased_14/7812.rst b/doc/build/changelog/unreleased_14/7812.rst deleted file mode 100644 index 00c7d3834ab..00000000000 --- a/doc/build/changelog/unreleased_14/7812.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mssql, regression - :tickets: 7812 - - Fixed regression caused by :ticket:`7160` where FK reflection in - conjunction with a low compatibility level setting (compatibility level 80: - SQL Server 2000) causes an "Ambiguous column name" error. Patch courtesy - @Lin-Your. diff --git a/doc/build/changelog/unreleased_14/7823.rst b/doc/build/changelog/unreleased_14/7823.rst deleted file mode 100644 index 249a749d027..00000000000 --- a/doc/build/changelog/unreleased_14/7823.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, performance - :tickets: 7823 - - Improvements in memory usage by the ORM, removing a significant set of - intermediary expression objects that are typically stored when a copy of an - expression object is created. These clones have been greatly reduced, - reducing the number of total expression objects stored in memory by - ORM mappings by about 30%. diff --git a/doc/build/changelog/unreleased_14/7827.rst b/doc/build/changelog/unreleased_14/7827.rst deleted file mode 100644 index aedf25809d7..00000000000 --- a/doc/build/changelog/unreleased_14/7827.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, ext - :tickets: 7827 - - Improved the error message that's raised for the case where the - :func:`.association_proxy` construct attempts to access a target attribute - at the class level, and this access fails. The particular use case here is - when proxying to a hybrid attribute that does not include a working - class-level implementation. - diff --git a/doc/build/changelog/unreleased_14/7842.rst b/doc/build/changelog/unreleased_14/7842.rst deleted file mode 100644 index c165ed44bff..00000000000 --- a/doc/build/changelog/unreleased_14/7842.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7842 - - Fixed issue where the :class:`_orm.Mapper` would reduce a user-defined - :paramref:`_orm.Mapper.primary_key` argument too aggressively, in the case - of mapping to a ``UNION`` where for some of the SELECT entries, two columns - are essentially equivalent, but in another, they are not, such as in a - recursive CTE. The logic here has been changed to accept a given - user-defined PK as given, where columns will be related to the mapped - selectable but no longer "reduced" as this heuristic can't accommodate for - all situations. diff --git a/doc/build/changelog/unreleased_14/7845.rst b/doc/build/changelog/unreleased_14/7845.rst deleted file mode 100644 index 1cfa9cdf6bb..00000000000 --- a/doc/build/changelog/unreleased_14/7845.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: usecase, sql - :tickets: 7845 - - Added new parameter - :paramref:`.FunctionElement.table_valued.joins_implicitly`, for the - :meth:`.FunctionElement.table_valued` construct. This parameter indicates - that the given table-valued function implicitly joins to the table it - refers towards, essentially disabling the "from linting" feature, i.e. the - "cartesian product" warning, from taking effect due to the presence of this - parameter. May be used for functions such as ``func.json_each()``. diff --git a/doc/build/changelog/unreleased_14/7853.rst b/doc/build/changelog/unreleased_14/7853.rst deleted file mode 100644 index 66856c29e03..00000000000 --- a/doc/build/changelog/unreleased_14/7853.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7853 - - Further clarified connection-level logging to indicate the BEGIN, ROLLBACK - and COMMIT log messages do not actually indicate a real transaction when - the AUTOCOMMIT isolation level is in use; messaging has been extended to - include the BEGIN message itself, and the messaging has also been fixed to - accommodate when the :class:`.Engine` level - :paramref:`.create_engine.isolation_level` parameter was used directly. diff --git a/doc/build/changelog/unreleased_14/7860.rst b/doc/build/changelog/unreleased_14/7860.rst deleted file mode 100644 index cc41aa00bfb..00000000000 --- a/doc/build/changelog/unreleased_14/7860.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: usecase, schema - :tickets: 7860 - - Added support so that the :paramref:`.Table.to_metadata.referred_schema_fn` - callable passed to :meth:`.Table.to_metadata` may return the value - :attr:`.BLANK_SCHEMA` to indicate that the referenced foreign key should be - reset to None. The :attr:`.RETAIN_SCHEMA` symbol may also be returned from - this function to indicate "no change", which will behave the same as - ``None`` currently does which also indicates no change. - diff --git a/doc/build/changelog/unreleased_14/7861.rst b/doc/build/changelog/unreleased_14/7861.rst deleted file mode 100644 index 49ac82ad84a..00000000000 --- a/doc/build/changelog/unreleased_14/7861.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: usecase, orm - :tickets: 7861 - - Added new attributes :attr:`.UpdateBase.returning_column_descriptions` and - :attr:`.UpdateBase.entity_description` to allow for inspection of ORM - attributes and entities that are installed as part of an :class:`.Insert`, - :class:`.Update`, or :class:`.Delete` construct. The - :attr:`.Select.column_descriptions` accessor is also now implemented for - Core-only selectables. diff --git a/doc/build/changelog/unreleased_14/7862.rst b/doc/build/changelog/unreleased_14/7862.rst deleted file mode 100644 index 00252ec8dd3..00000000000 --- a/doc/build/changelog/unreleased_14/7862.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7862 - - Fixed bug in :func:`_orm.with_loader_criteria` function where loader - criteria would not be applied to a joined eager load that were invoked - within the scope of a refresh operation for the parent object. diff --git a/doc/build/changelog/unreleased_14/7868.rst b/doc/build/changelog/unreleased_14/7868.rst deleted file mode 100644 index d57b22296e5..00000000000 --- a/doc/build/changelog/unreleased_14/7868.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7868 - - Fixed regression in "dynamic" loader strategy where the - :meth:`_orm.Query.filter_by` method would not be given an appropriate - entity to filter from, in the case where a "secondary" table were present - in the relationship being queried and the mapping were against something - complex such as a "with polymorphic". diff --git a/doc/build/changelog/unreleased_14/7876.rst b/doc/build/changelog/unreleased_14/7876.rst deleted file mode 100644 index c3b1c77b75c..00000000000 --- a/doc/build/changelog/unreleased_14/7876.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7876 - - The :paramref:`.bindparam.literal_execute` parameter now takes part - of the cache generation of a :func:`.bindparam`, since it changes - the sql string generated by the compiler. - Previously the correct bind values were used, but the ``literal_execute`` - would be ignored on subsequent executions of the same query. diff --git a/doc/build/changelog/unreleased_14/7877.rst b/doc/build/changelog/unreleased_14/7877.rst deleted file mode 100644 index d6ad6facd55..00000000000 --- a/doc/build/changelog/unreleased_14/7877.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: usecase, engine - :tickets: 7877, 7815 - - Added new parameter :paramref:`.Engine.dispose.close`, defaulting to True. - When False, the engine disposal does not touch the connections in the old - pool at all, simply dropping the pool and replacing it. This use case is so - that when the original pool is transferred from a parent process, the - parent process may continue to use those connections. - - .. seealso:: - - :ref:`pooling_multiprocessing` - revised documentation diff --git a/doc/build/conf.py b/doc/build/conf.py index 7f55f5208fc..591478dacdf 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.32" +release = "1.4.33" -release_date = "March 6, 2022" +release_date = "March 31, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From b47af2637376d62d11e592f97aa68c375f9202d0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 10:38:05 -0400 Subject: [PATCH 184/632] Version 1.4.34 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 322c96000d9..3b1b051387c 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.34 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.33 :released: March 31, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index fc34b3ab262..9610e0a66f7 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.33" +__version__ = "1.4.34" def __go(lcls): From e6d783b293095030c864292ed3c67402497c4174 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 14:56:52 -0400 Subject: [PATCH 185/632] add template methods for ORMInsert Fixed regression caused by :ticket:`7861` where invoking an :class:`.Insert` construct which contained ORM entities via :meth:`_orm.Session.execute` would fail. Fixes: #7878 Change-Id: Icc4d8028249cc417f504fdd3e31e206b5bbc89f8 (cherry picked from commit cbe38dbc667436f5da74ce7c3d6e5451f41c62e2) --- doc/build/changelog/unreleased_14/7878.rst | 7 ++++++ lib/sqlalchemy/orm/persistence.py | 27 +++++++++++++++++++- test/orm/test_session.py | 29 ++++++++++++++++++++++ 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7878.rst diff --git a/doc/build/changelog/unreleased_14/7878.rst b/doc/build/changelog/unreleased_14/7878.rst new file mode 100644 index 00000000000..6c9e92929e8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7878.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7878 + + Fixed regression caused by :ticket:`7861` where invoking an + :class:`.Insert` construct which contained ORM entities directly via + :meth:`_orm.Session.execute` would fail. diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index c3d2cfd49c9..654e659f411 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -2186,7 +2186,32 @@ def _attr_for_col(c, ent): @CompileState.plugin_for("orm", "insert") class ORMInsert(ORMDMLState, InsertDMLState): - pass + @classmethod + def orm_pre_session_exec( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + is_reentrant_invoke, + ): + return ( + statement, + util.immutabledict(execution_options), + ) + + @classmethod + def orm_setup_cursor_result( + cls, + session, + statement, + params, + execution_options, + bind_arguments, + result, + ): + return result @CompileState.plugin_for("orm", "update") diff --git a/test/orm/test_session.py b/test/orm/test_session.py index 295fd8205f3..83ce629700c 100644 --- a/test/orm/test_session.py +++ b/test/orm/test_session.py @@ -1,8 +1,10 @@ import inspect as _py_inspect import sqlalchemy as sa +from sqlalchemy import delete from sqlalchemy import event from sqlalchemy import ForeignKey +from sqlalchemy import insert from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import select @@ -10,6 +12,7 @@ from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text +from sqlalchemy import update from sqlalchemy.orm import attributes from sqlalchemy.orm import backref from sqlalchemy.orm import close_all_sessions @@ -2181,6 +2184,32 @@ def test_unbuffered_result_session_is_closed(self, meth): ): result.all() + @testing.combinations("insert", "update", "delete", argnames="dml_expr") + @testing.combinations("core", "orm", argnames="coreorm") + def test_dml_execute(self, dml_expr, coreorm): + User = self.classes.User + users = self.tables.users + + sess = fixture_session() + + if coreorm == "orm": + if dml_expr == "insert": + stmt = insert(User).values(id=12, name="some user") + elif dml_expr == "update": + stmt = update(User).values(name="sone name").filter_by(id=15) + else: + stmt = delete(User).filter_by(id=15) + else: + if dml_expr == "insert": + stmt = insert(users).values(id=12, name="some user") + elif dml_expr == "update": + stmt = update(users).values(name="sone name").filter_by(id=15) + else: + stmt = delete(users).filter_by(id=15) + + result = sess.execute(stmt) + result.close() + @testing.combinations((True,), (False,), argnames="prebuffered") @testing.combinations(("close",), ("expunge_all",), argnames="meth") def test_unbuffered_result_before_session_is_closed( From 00b37e1462ad6aa4b85d3eec7d07a90a7b6c480c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 16:53:43 -0400 Subject: [PATCH 186/632] allow executemany values for ON CONFLICT DO NOTHING Scaled back a fix made for :ticket:`6581` where "executemany values" mode for psycopg2 were disabled for all "ON CONFLICT" styles of INSERT, to not apply to the "ON CONFLICT DO NOTHING" clause, which does not include any parameters and is safe for "executemany values" mode. "ON CONFLICT DO UPDATE" is still blocked from "executemany values" as there may be additional parameters in the DO UPDATE clause that cannot be batched (which is the original issue fixed by :ticket:`6581`). Fixes: #7880 Change-Id: Id3e23a0c6699333409a50148fa8923cb8e564bdc (cherry picked from commit cc8c5835a92b0035530b541c81b0c714b570b095) --- doc/build/changelog/unreleased_14/7880.rst | 11 ++++++++++ lib/sqlalchemy/dialects/postgresql/base.py | 19 ++++++++++++++++++ .../dialects/postgresql/psycopg2.py | 2 +- lib/sqlalchemy/sql/compiler.py | 9 +-------- test/dialect/postgresql/test_dialect.py | 20 +++++++++++++++++-- 5 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7880.rst diff --git a/doc/build/changelog/unreleased_14/7880.rst b/doc/build/changelog/unreleased_14/7880.rst new file mode 100644 index 00000000000..9abbac1cfa8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7880.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, postgresql + :tickets: 7880 + + Scaled back a fix made for :ticket:`6581` where "executemany values" mode + for psycopg2 were disabled for all "ON CONFLICT" styles of INSERT, to + not apply to the "ON CONFLICT DO NOTHING" clause, which does not include + any parameters and is safe for "executemany values" mode. "ON CONFLICT + DO UPDATE" is still blocked from "executemany values" as there may + be additional parameters in the DO UPDATE clause that cannot be batched + (which is the original issue fixed by :ticket:`6581`). diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 37b2113e989..4f63002bc9b 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1429,6 +1429,7 @@ def bind_expression(self, bindvalue): from uuid import UUID as _python_UUID from . import array as _array +from . import dml from . import hstore as _hstore from . import json as _json from . import ranges as _ranges @@ -2463,6 +2464,24 @@ def _on_conflict_target(self, clause, **kw): return target_text + @util.memoized_property + def _is_safe_for_fast_insert_values_helper(self): + # don't allow fast executemany if _post_values_clause is + # present and is not an OnConflictDoNothing. what this means + # concretely is that the + # "fast insert executemany helper" won't be used, in other + # words we won't convert "executemany()" of many parameter + # sets into a single INSERT with many elements in VALUES. + # We can't apply that optimization safely if for example the + # statement includes a clause like "ON CONFLICT DO UPDATE" + + return self.insert_single_values_expr is not None and ( + self.statement._post_values_clause is None + or isinstance( + self.statement._post_values_clause, dml.OnConflictDoNothing + ) + ) + def visit_on_conflict_do_nothing(self, on_conflict, **kw): target_text = self._on_conflict_target(on_conflict, **kw) diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 19d7b06ac9f..adebc9b6769 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -927,7 +927,7 @@ def do_executemany(self, cursor, statement, parameters, context=None): self.executemany_mode & EXECUTEMANY_VALUES and context and context.isinsert - and context.compiled.insert_single_values_expr + and context.compiled._is_safe_for_fast_insert_values_helper ): executemany_values = ( "(%s)" % context.compiled.insert_single_values_expr diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 671ca674924..7393629a406 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -3930,14 +3930,7 @@ def visit_insert(self, insert_stmt, **kw): [value for c, expr, value in crud_params] ) text += " VALUES (%s)" % insert_single_values_expr - if toplevel and insert_stmt._post_values_clause is None: - # don't assign insert_single_values_expr if _post_values_clause - # is present. what this means concretely is that the - # "fast insert executemany helper" won't be used, in other - # words we won't convert "executemany()" of many parameter - # sets into a single INSERT with many elements in VALUES. - # We can't apply that optimization safely if for example the - # statement includes a clause like "ON CONFLICT DO UPDATE" + if toplevel: self.insert_single_values_expr = insert_single_values_expr if insert_stmt._post_values_clause is not None: diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index c0eb4410cf9..1d797a697d7 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -30,6 +30,7 @@ from sqlalchemy import TypeDecorator from sqlalchemy import util from sqlalchemy.dialects.postgresql import base as postgresql +from sqlalchemy.dialects.postgresql import insert as pg_insert from sqlalchemy.dialects.postgresql import psycopg2 as psycopg2_dialect from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_BATCH from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_PLAIN @@ -357,7 +358,10 @@ def define_tables(cls, metadata): Column(ue("\u6e2c\u8a66"), Integer), ) - def test_insert(self, connection): + @testing.combinations( + "insert", "pg_insert", "pg_insert_on_conflict", argnames="insert_type" + ) + def test_insert(self, connection, insert_type): from psycopg2 import extras values_page_size = connection.dialect.executemany_values_page_size @@ -377,11 +381,23 @@ def test_insert(self, connection): else: assert False + if insert_type == "pg_insert_on_conflict": + stmt += " ON CONFLICT DO NOTHING" + with mock.patch.object( extras, meth.__name__, side_effect=meth ) as mock_exec: + if insert_type == "insert": + ins_stmt = self.tables.data.insert() + elif insert_type == "pg_insert": + ins_stmt = pg_insert(self.tables.data) + elif insert_type == "pg_insert_on_conflict": + ins_stmt = pg_insert(self.tables.data).on_conflict_do_nothing() + else: + assert False + connection.execute( - self.tables.data.insert(), + ins_stmt, [ {"x": "x1", "y": "y1"}, {"x": "x2", "y": "y2"}, From faa23f1729ec86ee1e0d7ba4d0fd30fb331f867b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 18:10:43 -0400 Subject: [PATCH 187/632] - 1.4.34 --- doc/build/changelog/changelog_14.rst | 22 +++++++++++++++++++++- doc/build/changelog/unreleased_14/7878.rst | 7 ------- doc/build/changelog/unreleased_14/7880.rst | 11 ----------- doc/build/conf.py | 2 +- 4 files changed, 22 insertions(+), 20 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7878.rst delete mode 100644 doc/build/changelog/unreleased_14/7880.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3b1b051387c..80205285ac2 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,27 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.34 - :include_notes_from: unreleased_14 + :released: March 31, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 7878 + + Fixed regression caused by :ticket:`7861` where invoking an + :class:`.Insert` construct which contained ORM entities directly via + :meth:`_orm.Session.execute` would fail. + + .. change:: + :tags: bug, postgresql + :tickets: 7880 + + Scaled back a fix made for :ticket:`6581` where "executemany values" mode + for psycopg2 were disabled for all "ON CONFLICT" styles of INSERT, to + not apply to the "ON CONFLICT DO NOTHING" clause, which does not include + any parameters and is safe for "executemany values" mode. "ON CONFLICT + DO UPDATE" is still blocked from "executemany values" as there may + be additional parameters in the DO UPDATE clause that cannot be batched + (which is the original issue fixed by :ticket:`6581`). .. changelog:: :version: 1.4.33 diff --git a/doc/build/changelog/unreleased_14/7878.rst b/doc/build/changelog/unreleased_14/7878.rst deleted file mode 100644 index 6c9e92929e8..00000000000 --- a/doc/build/changelog/unreleased_14/7878.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7878 - - Fixed regression caused by :ticket:`7861` where invoking an - :class:`.Insert` construct which contained ORM entities directly via - :meth:`_orm.Session.execute` would fail. diff --git a/doc/build/changelog/unreleased_14/7880.rst b/doc/build/changelog/unreleased_14/7880.rst deleted file mode 100644 index 9abbac1cfa8..00000000000 --- a/doc/build/changelog/unreleased_14/7880.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 7880 - - Scaled back a fix made for :ticket:`6581` where "executemany values" mode - for psycopg2 were disabled for all "ON CONFLICT" styles of INSERT, to - not apply to the "ON CONFLICT DO NOTHING" clause, which does not include - any parameters and is safe for "executemany values" mode. "ON CONFLICT - DO UPDATE" is still blocked from "executemany values" as there may - be additional parameters in the DO UPDATE clause that cannot be batched - (which is the original issue fixed by :ticket:`6581`). diff --git a/doc/build/conf.py b/doc/build/conf.py index 591478dacdf..aedb4608207 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,7 +213,7 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.33" +release = "1.4.34" release_date = "March 31, 2022" From 0386f976e9de2f416a04013d22575774056c504d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 31 Mar 2022 18:28:33 -0400 Subject: [PATCH 188/632] Version 1.4.35 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 80205285ac2..16740c95910 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.35 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.34 :released: March 31, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 9610e0a66f7..456194d139b 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.34" +__version__ = "1.4.35" def __go(lcls): From 7d012e60b0cd6e5c114ec0c86b3ca2e04cf8a8d7 Mon Sep 17 00:00:00 2001 From: Maple Date: Sun, 3 Apr 2022 17:30:08 +0800 Subject: [PATCH 189/632] fix small indentation typo (#7882) --- doc/build/tutorial/metadata.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 24284c4aaca..afaf9d6100e 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -446,18 +446,18 @@ than having the declarative process generate it:: class User(Base): __table__ = user_table - addresses = relationship("Address", back_populates="user") + addresses = relationship("Address", back_populates="user") - def __repr__(self): + def __repr__(self): return f"User({self.name!r}, {self.fullname!r})" class Address(Base): __table__ = address_table - user = relationship("User", back_populates="addresses") + user = relationship("User", back_populates="addresses") - def __repr__(self): - return f"Address({self.email_address!r})" + def __repr__(self): + return f"Address({self.email_address!r})" The above two classes are equivalent to those which we declared in the previous mapping example. From 9f0e8b03f2922423a34b0a711642741b47a9bb2e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 3 Apr 2022 10:39:19 -0400 Subject: [PATCH 190/632] clarify alternative mapping example this second example is not part of the doctest steps, clarify that it's not part of code examples to be present in execution steps. Add an extra registry + declarative base on top so that even if someone does run it, the Base will have been reset and the examples will continue to work (noting that column order in statements may change, but probably nothing else). Fixes: #7891 Change-Id: Icb1ba310230841e502185d9d0cadd3c18d467292 (cherry picked from commit 1dffb7cedeb009ca6c532db558bd0588dd846957) --- doc/build/tutorial/metadata.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index afaf9d6100e..6444ed692e0 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -443,6 +443,9 @@ This form is called :ref:`hybrid table `, and it consists of assigning to the ``.__table__`` attribute directly, rather than having the declarative process generate it:: + mapper_registry = registry() + Base = mapper_registry.generate_base() + class User(Base): __table__ = user_table @@ -459,6 +462,15 @@ than having the declarative process generate it:: def __repr__(self): return f"Address({self.email_address!r})" +.. note:: The above example is an **alternative form** to the mapping that's + first illustrated previously at :ref:`tutorial_declaring_mapped_classes`. + This example is for illustrative purposes only, and is not part of this + tutorial's "doctest" steps, and as such does not need to be run for readers + who are executing code examples. The mapping here and the one at + :ref:`tutorial_declaring_mapped_classes` produce equivalent mappings, but in + general one would use only **one** of these two forms for particular mapped + class. + The above two classes are equivalent to those which we declared in the previous mapping example. From 270ba157a171777224ef63f8c9db7934d1053720 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 3 Apr 2022 11:28:57 -0400 Subject: [PATCH 191/632] TableValuedAlias generation fixes Fixed bug in newly implemented :paramref:`.FunctionElement.table_valued.joins_implicitly` feature where the parameter would not automatically propagate from the original :class:`.TableValuedAlias` object to the secondary object produced when calling upon :meth:`.TableValuedAlias.render_derived` or :meth:`.TableValuedAlias.alias`. Additionally repaired these issues in :class:`.TableValuedAlias`: * repaired a potential memory issue which could occur when repeatedly calling :meth:`.TableValuedAlias.render_derived` against successive copies of the same object (for .alias(), we currently have to still continue chaining from the previous element. not sure if this can be improved but this is standard behavior for .alias() elsewhere) * repaired issue where the individual element types would be lost when calling upon :meth:`.TableValuedAlias.render_derived` or :meth:`.TableValuedAlias.alias`. Fixes: #7890 Change-Id: Ie5120c7ff1e5c1bba5aaf77c782a51c637860208 (cherry picked from commit c315c7401a2aa00a8a0fa0f7d4189a9976fd7962) --- doc/build/changelog/unreleased_14/7890.rst | 22 ++++++++++++++++++ lib/sqlalchemy/sql/selectable.py | 19 ++++++++++++++-- test/aaa_profiling/test_memusage.py | 11 +++++++++ test/sql/test_from_linter.py | 22 ++++++++++++++---- test/sql/test_functions.py | 26 +++++++++++++++++++++- 5 files changed, 93 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7890.rst diff --git a/doc/build/changelog/unreleased_14/7890.rst b/doc/build/changelog/unreleased_14/7890.rst new file mode 100644 index 00000000000..94a29abde5c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7890.rst @@ -0,0 +1,22 @@ +.. change:: + :tags: bug, sql + :tickets: 7890 + + Fixed bug in newly implemented + :paramref:`.FunctionElement.table_valued.joins_implicitly` feature where + the parameter would not automatically propagate from the original + :class:`.TableValuedAlias` object to the secondary object produced when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. + + Additionally repaired these issues in :class:`.TableValuedAlias`: + + * repaired a potential memory issue which could occur when + repeatedly calling :meth:`.TableValuedAlias.render_derived` against + successive copies of the same object (for .alias(), we currently + have to still continue chaining from the previous element. not sure + if this can be improved but this is standard behavior for .alias() + elsewhere) + * repaired issue where the individual element types would be lost when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 028ed99a60c..ea81ce67058 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1820,10 +1820,17 @@ def alias(self, name=None): """ - tva = TableValuedAlias._construct(self, name=name) + tva = TableValuedAlias._construct( + self, + name=name, + table_value_type=self._tableval_type, + joins_implicitly=self.joins_implicitly, + ) + if self._render_derived: tva._render_derived = True tva._render_derived_w_types = self._render_derived_w_types + return tva def lateral(self, name=None): @@ -1884,7 +1891,15 @@ def render_derived(self, name=None, with_types=False): # python id() of the original which can cause name conflicts if # a new anon-name grabs the same identifier as the local anon-name # (just saw it happen on CI) - new_alias = TableValuedAlias._construct(self, name=name) + + # construct against original to prevent memory growth + # for repeated generations + new_alias = TableValuedAlias._construct( + self.element, + name=name, + table_value_type=self._tableval_type, + joins_implicitly=self.joins_implicitly, + ) new_alias._render_derived = True new_alias._render_derived_w_types = with_types return new_alias diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index c842b593016..9abc3511a03 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -6,6 +6,7 @@ import sqlalchemy as sa from sqlalchemy import ForeignKey +from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer from sqlalchemy import MetaData @@ -366,6 +367,16 @@ def go(): go() + def test_tv_render_derived(self): + root_expr = func.some_fn().table_valued() + expr = [root_expr] + + @profile_memory() + def go(): + expr[0] = expr[0].render_derived() + + go() + class MemUsageWBackendTest(fixtures.MappedTest, EnsureZeroed): diff --git a/test/sql/test_from_linter.py b/test/sql/test_from_linter.py index 4a4d907f965..1fa3aff360f 100644 --- a/test/sql/test_from_linter.py +++ b/test/sql/test_from_linter.py @@ -165,8 +165,15 @@ def test_lateral_subqueries_ok_do_we_still_find_cartesians(self): assert start is p3 assert froms == {p1} + @testing.combinations( + "render_derived", "alias", None, argnames="additional_transformation" + ) @testing.combinations(True, False, argnames="joins_implicitly") - def test_table_valued(self, joins_implicitly): + def test_table_valued( + self, + joins_implicitly, + additional_transformation, + ): """test #7845""" my_table = table( "tbl", @@ -175,9 +182,16 @@ def test_table_valued(self, joins_implicitly): ) sub_dict = my_table.c.data["d"] - tv = func.json_each(sub_dict).table_valued( - "key", joins_implicitly=joins_implicitly - ) + + tv = func.json_each(sub_dict) + + tv = tv.table_valued("key", joins_implicitly=joins_implicitly) + + if additional_transformation == "render_derived": + tv = tv.render_derived(name="tv", with_types=True) + elif additional_transformation == "alias": + tv = tv.alias() + has_key = tv.c.key == "f" stmt = select(my_table.c.id).where(has_key) froms, start = find_unmatching_froms(stmt, my_table) diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py index 27f1b897420..c4326b8abc8 100644 --- a/test/sql/test_functions.py +++ b/test/sql/test_functions.py @@ -25,7 +25,6 @@ from sqlalchemy import testing from sqlalchemy import Text from sqlalchemy import true -from sqlalchemy import types as sqltypes from sqlalchemy import util from sqlalchemy.dialects import mysql from sqlalchemy.dialects import oracle @@ -37,6 +36,7 @@ from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.sql import operators from sqlalchemy.sql import quoted_name +from sqlalchemy.sql import sqltypes from sqlalchemy.sql import table from sqlalchemy.sql.compiler import BIND_TEMPLATES from sqlalchemy.sql.functions import FunctionElement @@ -1430,6 +1430,30 @@ def test_named_with_ordinality(self): "LEFT OUTER JOIN b ON unnested.unnested = b.ref", ) + def test_render_derived_maintains_tableval_type(self): + fn = func.json_something() + + tv = fn.table_valued(column("x", String)) + + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + tv = tv.render_derived() + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + def test_alias_maintains_tableval_type(self): + fn = func.json_something() + + tv = fn.table_valued(column("x", String)) + + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + + tv = tv.alias() + eq_(tv.column.type, testing.eq_type_affinity(sqltypes.TableValueType)) + eq_(tv.column.type._elements[0].type, testing.eq_type_affinity(String)) + def test_star_with_ordinality(self): """ SELECT * FROM generate_series(4,1,-1) WITH ORDINALITY; From 77654cb534de126471263f8876b6be6794da8506 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 5 Apr 2022 14:39:32 -0400 Subject: [PATCH 192/632] fix typo in lambda example we might have an actual customer so clean up shop Change-Id: I0b1e36ad78f364805a3a7bfd6fac953cf94b838f (cherry picked from commit 7935b76d9e5b5fd4e64b2c6c3473737186acf2db) --- doc/build/core/connections.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 3c2875bb4c1..c683c7ee9df 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1593,9 +1593,9 @@ Basic guidelines include: def my_stmt(parameter, thing=False): stmt = lambda_stmt(lambda: select(table)) if thing: - stmt += s.where(table.c.x > parameter) + stmt += lambda s: s.where(table.c.x > parameter) else: - stmt += s.where(table.c.y == parameter) + stmt += lambda s: s.where(table.c.y == parameter) return stmt There are a variety of failures which can occur if the lambda does not From d1f97828fd45ad65c46c6cb8eb8e4091ad027274 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 6 Apr 2022 09:41:11 -0400 Subject: [PATCH 193/632] maintain complete cloned_set for BindParameter Fixed regression caused by :ticket:`7823` which impacted the caching system, such that bound parameters that had been "cloned" within ORM operations, such as polymorphic loading, would in some cases not acquire their correct execution-time value leading to incorrect bind values being rendered. Fixes: #7903 Change-Id: I61c802749b859bebeb127d24e66d6e77d13ce57a (cherry picked from commit 2168a64affb2e299b9a37079af7b2a8d4ae0ff64) --- doc/build/changelog/unreleased_14/7903.rst | 9 ++++ lib/sqlalchemy/sql/elements.py | 10 ++++- lib/sqlalchemy/testing/fixtures.py | 7 +++ test/aaa_profiling/test_memusage.py | 10 ++++- test/orm/inheritance/test_polymorphic_rel.py | 46 ++++++++++++++++++-- test/sql/test_compiler.py | 17 ++++++-- 6 files changed, 90 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7903.rst diff --git a/doc/build/changelog/unreleased_14/7903.rst b/doc/build/changelog/unreleased_14/7903.rst new file mode 100644 index 00000000000..c2a4e007875 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7903.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, regression, caching + :tickets: 7903 + + Fixed regression caused by :ticket:`7823` which impacted the caching + system, such that bound parameters that had been "cloned" within ORM + operations, such as polymorphic loading, would in some cases not acquire + their correct execution-time value leading to incorrect bind values being + rendered. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 81645ad0a0a..da9c5f6b569 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -250,7 +250,6 @@ def _clone(self, **kw): # old table. cc = self._is_clone_of c._is_clone_of = cc if cc is not None else self - return c def _negate_in_binary(self, negated_op, original_op): @@ -1633,6 +1632,15 @@ def _with_binary_element_type(self, type_): def _clone(self, maintain_key=False, **kw): c = ClauseElement._clone(self, **kw) + # ensure all the BindParameter objects stay in cloned set. + # in #7823, we changed "clone" so that a clone only keeps a reference + # to the "original" element, since for column correspondence, that's + # all we need. However, for BindParam, _cloned_set is used by + # the "cache key bind match" lookup, which means if any of those + # interim BindParameter objects became part of a cache key in the + # cache, we need it. So here, make sure all clones keep carrying + # forward. + c._cloned_set.update(self._cloned_set) if not maintain_key and self.unique: c.key = _anonymous_label.safe_construct( id(c), c._orig_key or "param", sanitize_key=True diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index ff5c3dd101f..f5bdd44922e 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -50,6 +50,13 @@ class TestBase(object): def assert_(self, val, msg=None): assert val, msg + @config.fixture() + def nocache(self): + _cache = config.db._compiled_cache + config.db._compiled_cache = None + yield + config.db._compiled_cache = _cache + @config.fixture() def connection_no_trans(self): eng = getattr(self, "bind", None) or config.db diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index 9abc3511a03..bd727a842ac 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -357,8 +357,14 @@ def go(): go() def test_clone_expression(self): - - root_expr = column("x", Integer) == 12 + # this test is for the memory issue "fixed" in #7823, where clones + # no longer carry along all past elements. + # However, due to #7903, we can't at the moment use a + # BindParameter here - these have to continue to carry along all + # the previous clones for now. So the test here only works with + # expressions that dont have BindParameter objects in them. + + root_expr = column("x", Integer) == column("y", Integer) expr = [root_expr] @profile_memory() diff --git a/test/orm/inheritance/test_polymorphic_rel.py b/test/orm/inheritance/test_polymorphic_rel.py index 60235bd86cd..aa8d9eaec68 100644 --- a/test/orm/inheritance/test_polymorphic_rel.py +++ b/test/orm/inheritance/test_polymorphic_rel.py @@ -10,10 +10,10 @@ from sqlalchemy.orm import joinedload from sqlalchemy.orm import selectinload from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import with_parent from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import assert_raises from sqlalchemy.testing import eq_ -from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session from ._poly_fixtures import _Polymorphic @@ -30,7 +30,7 @@ from ._poly_fixtures import Person -class _PolymorphicTestBase(fixtures.NoCache): +class _PolymorphicTestBase(object): __backend__ = True __dialect__ = "default_enhanced" @@ -195,6 +195,34 @@ def test_get_three(self): Boss(name="pointy haired boss", golf_swing="fore"), ) + def test_lazyload_related_w_cache_check(self): + sess = fixture_session() + + c1 = sess.get(Company, 1) + c2 = sess.get(Company, 2) + + q1 = ( + sess.query(Person) + .filter(with_parent(c1, Company.employees)) + .order_by(Person.person_id) + ) + eq_( + q1.all(), + [ + Engineer(name="dilbert"), + Engineer(name="wally"), + Boss(name="pointy haired boss"), + Manager(name="dogbert"), + ], + ) + + q2 = ( + sess.query(Person) + .filter(with_parent(c2, Company.employees)) + .order_by(Person.person_id) + ) + eq_(q2.all(), [Engineer(name="vlad")]) + def test_multi_join(self): sess = fixture_session() e = aliased(Person) @@ -881,7 +909,7 @@ def go(): self.assert_sql_count(testing.db, go, 1) - def test_with_polymorphic_three_future(self): + def test_with_polymorphic_three_future(self, nocache): sess = fixture_session() def go(): @@ -2284,6 +2312,12 @@ def test_flat_aliased_w_select_from(self): class PolymorphicUnionsTest(_PolymorphicTestBase, _PolymorphicUnions): + @testing.skip_if( + lambda: True, "join condition doesn't work w/ this mapping" + ) + def test_lazyload_related_w_cache_check(self): + pass + def test_with_polymorphic_two_future_default_wp(self): """test #7262 @@ -2385,6 +2419,12 @@ def test_subqueryload_on_subclass_uses_path_correctly(self): class PolymorphicAliasedJoinsTest( _PolymorphicTestBase, _PolymorphicAliasedJoins ): + @testing.skip_if( + lambda: True, "join condition doesn't work w/ this mapping" + ) + def test_lazyload_related_w_cache_check(self): + pass + def test_with_polymorphic_two_future_default_wp(self): """test #7262 diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 50fd582b7d6..f5f17a35014 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3938,7 +3938,8 @@ def test_construct_params_combine_extracted( extracted_parameters=s1_cache_key[1], ) - def test_construct_params_w_bind_clones_post(self): + @testing.combinations(True, False, argnames="adapt_before_key") + def test_construct_params_w_bind_clones_post(self, adapt_before_key): """test that a BindParameter that has been cloned after the cache key was generated still matches up when construct_params() is called with an extracted parameter collection. @@ -3962,6 +3963,11 @@ def test_construct_params_w_bind_clones_post(self): # it's anonymous so unique=True is_true(original_bind.unique) + # test #7903 - adapt the statement *before* we make the cache + # key also + if adapt_before_key: + stmt = sql_util.ClauseAdapter(table1).traverse(stmt) + # cache key against the original param cache_key = stmt._generate_cache_key() @@ -4014,7 +4020,8 @@ def test_construct_params_w_bind_clones_post(self): {"myid_1": 10}, ) - def test_construct_duped_params_w_bind_clones_post(self): + @testing.combinations(True, False, argnames="adapt_before_key") + def test_construct_duped_params_w_bind_clones_post(self, adapt_before_key): """same as previous test_construct_params_w_bind_clones_post but where the binds have been used repeatedly, and the adaption occurs on a per-subquery basis. @@ -4037,6 +4044,10 @@ def test_construct_duped_params_w_bind_clones_post(self): # it's anonymous so unique=True is_true(original_bind.unique) + # variant that exercises #7903 + if adapt_before_key: + stmt = sql_util.ClauseAdapter(table1).traverse(stmt) + # cache key against the original param cache_key = stmt._generate_cache_key() @@ -4118,7 +4129,7 @@ def test_construct_params_w_bind_clones_pre(self): be unique, still matches up when construct_params() is called with an extracted parameter collection. - other ORM feaures like optimized_compare() end up doing something + other ORM features like optimized_compare() end up doing something like this, such as if there are multiple "has()" or "any()" which would have cloned the join condition and changed the values of bound parameters. From 5414aa1a2015fe8c91920b333f3737161543209b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 6 Apr 2022 11:17:05 -0400 Subject: [PATCH 194/632] classify issue Change-Id: Id1e59fee382d1d72d78e99b1c56a5837b5503fed (cherry picked from commit 31ccec00da7e0e81781a523a9d1acbd926be6553) --- doc/build/changelog/unreleased_14/7903.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/7903.rst b/doc/build/changelog/unreleased_14/7903.rst index c2a4e007875..816504cfe30 100644 --- a/doc/build/changelog/unreleased_14/7903.rst +++ b/doc/build/changelog/unreleased_14/7903.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, regression, caching + :tags: bug, sql, regression :tickets: 7903 Fixed regression caused by :ticket:`7823` which impacted the caching From 615c5809b97f8122a5a6e407801bbe222912827d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 6 Apr 2022 11:23:05 -0400 Subject: [PATCH 195/632] - 1.4.35 --- doc/build/changelog/changelog_14.rst | 35 +++++++++++++++++++++- doc/build/changelog/unreleased_14/7890.rst | 22 -------------- doc/build/changelog/unreleased_14/7903.rst | 9 ------ doc/build/conf.py | 4 +-- 4 files changed, 36 insertions(+), 34 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7890.rst delete mode 100644 doc/build/changelog/unreleased_14/7903.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 16740c95910..ed8b372651b 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,40 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.35 - :include_notes_from: unreleased_14 + :released: April 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 7890 + + Fixed bug in newly implemented + :paramref:`.FunctionElement.table_valued.joins_implicitly` feature where + the parameter would not automatically propagate from the original + :class:`.TableValuedAlias` object to the secondary object produced when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. + + Additionally repaired these issues in :class:`.TableValuedAlias`: + + * repaired a potential memory issue which could occur when + repeatedly calling :meth:`.TableValuedAlias.render_derived` against + successive copies of the same object (for .alias(), we currently + have to still continue chaining from the previous element. not sure + if this can be improved but this is standard behavior for .alias() + elsewhere) + * repaired issue where the individual element types would be lost when + calling upon :meth:`.TableValuedAlias.render_derived` or + :meth:`.TableValuedAlias.alias`. + + .. change:: + :tags: bug, sql, regression + :tickets: 7903 + + Fixed regression caused by :ticket:`7823` which impacted the caching + system, such that bound parameters that had been "cloned" within ORM + operations, such as polymorphic loading, would in some cases not acquire + their correct execution-time value leading to incorrect bind values being + rendered. .. changelog:: :version: 1.4.34 diff --git a/doc/build/changelog/unreleased_14/7890.rst b/doc/build/changelog/unreleased_14/7890.rst deleted file mode 100644 index 94a29abde5c..00000000000 --- a/doc/build/changelog/unreleased_14/7890.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7890 - - Fixed bug in newly implemented - :paramref:`.FunctionElement.table_valued.joins_implicitly` feature where - the parameter would not automatically propagate from the original - :class:`.TableValuedAlias` object to the secondary object produced when - calling upon :meth:`.TableValuedAlias.render_derived` or - :meth:`.TableValuedAlias.alias`. - - Additionally repaired these issues in :class:`.TableValuedAlias`: - - * repaired a potential memory issue which could occur when - repeatedly calling :meth:`.TableValuedAlias.render_derived` against - successive copies of the same object (for .alias(), we currently - have to still continue chaining from the previous element. not sure - if this can be improved but this is standard behavior for .alias() - elsewhere) - * repaired issue where the individual element types would be lost when - calling upon :meth:`.TableValuedAlias.render_derived` or - :meth:`.TableValuedAlias.alias`. diff --git a/doc/build/changelog/unreleased_14/7903.rst b/doc/build/changelog/unreleased_14/7903.rst deleted file mode 100644 index 816504cfe30..00000000000 --- a/doc/build/changelog/unreleased_14/7903.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql, regression - :tickets: 7903 - - Fixed regression caused by :ticket:`7823` which impacted the caching - system, such that bound parameters that had been "cloned" within ORM - operations, such as polymorphic loading, would in some cases not acquire - their correct execution-time value leading to incorrect bind values being - rendered. diff --git a/doc/build/conf.py b/doc/build/conf.py index aedb4608207..fd10f6a0aaa 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.34" +release = "1.4.35" -release_date = "March 31, 2022" +release_date = "April 6, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From c00d653a4bff82ec4765d2bfba4ed19f1e201ae2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 6 Apr 2022 11:34:25 -0400 Subject: [PATCH 196/632] Version 1.4.36 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index ed8b372651b..79426351e94 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.36 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.35 :released: April 6, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 456194d139b..9c6fddf0a8d 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.35" +__version__ = "1.4.36" def __go(lcls): From 4d27269d46665900fd0ed2233f5147a692ca2533 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 8 Apr 2022 09:28:47 -0400 Subject: [PATCH 197/632] explicitly refer to Apple M1 for greenlet issue As developers are now buying lots of Apple M1 machines, and AFAWK greenlet is still not able to provide a pre-built wheel, we are going to get a lot of devs trying to use asyncio on their Apple M1s, in greater proportions compared to devs running containers etc. on other various less popular CPU architectures. Add a new FAQ section for installation, add new red dragon to the very top of asyncio docs, add new verbiage, all of which includes "Apple M1" in bold text, to minimize the chance of anyone missing this. Fixes: #7922 Fixes: #7714 Change-Id: I106923a2860a4efd77d1b999197be102afc1f73d (cherry picked from commit f7ebfa6072d65c32c61194b265662c957d3f09dd) --- doc/build/faq/index.rst | 1 + doc/build/faq/installation.rst | 29 ++++++++++++++++++++++++++++ doc/build/orm/extensions/asyncio.rst | 28 ++++++++++++++++++++------- 3 files changed, 51 insertions(+), 7 deletions(-) create mode 100644 doc/build/faq/installation.rst diff --git a/doc/build/faq/index.rst b/doc/build/faq/index.rst index 810a0401157..4b2397d5b8d 100644 --- a/doc/build/faq/index.rst +++ b/doc/build/faq/index.rst @@ -10,6 +10,7 @@ observed questions to well-known issues. .. toctree:: :maxdepth: 2 + installation connections metadata_schema sqlexpressions diff --git a/doc/build/faq/installation.rst b/doc/build/faq/installation.rst new file mode 100644 index 00000000000..500198df8a4 --- /dev/null +++ b/doc/build/faq/installation.rst @@ -0,0 +1,29 @@ +Installation +================= + +.. contents:: + :local: + :class: faq + :backlinks: none + +.. _faq_asyncio_installation: + +I'm getting an error about greenlet not being installed when I try to use asyncio +---------------------------------------------------------------------------------- + +The ``greenlet`` dependency does not install by default for CPU architectures +for which ``greenlet`` does not supply a `pre-built binary wheel `_. +Notably, **this includes Apple M1**. To install including ``greenlet``, +add the ``asyncio`` `setuptools extra `_ +to the ``pip install`` command:: + + pip install sqlalchemy[asyncio] + +For more background, see :ref:`asyncio_install`. + + +.. seealso:: + + :ref:`asyncio_install` + + diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 679d8f0564b..8516ad851ad 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -8,7 +8,10 @@ included, using asyncio-compatible dialects. .. versionadded:: 1.4 -.. note:: The asyncio extension as of SQLAlchemy 1.4.3 can now be considered to +.. warning:: Please read :ref:`asyncio_install` for important platform + installation notes for many platforms, including **Apple M1 Architecture**. + +.. tip:: The asyncio extension as of SQLAlchemy 1.4.3 can now be considered to be **beta level** software. API details are subject to change however at this point it is unlikely for there to be significant backwards-incompatible changes. @@ -22,22 +25,33 @@ included, using asyncio-compatible dialects. .. _asyncio_install: -Asyncio Platform Installation Notes ------------------------------------- +Asyncio Platform Installation Notes (Including Apple M1) +--------------------------------------------------------- -The asyncio extension requires at least Python version 3.6. It also depends +The asyncio extension requires Python 3 only. It also depends upon the `greenlet `_ library. This dependency is installed by default on common machine platforms including:: x86_64 aarch64 ppc64le amd64 win32 For the above platforms, ``greenlet`` is known to supply pre-built wheel files. -To ensure the ``greenlet`` dependency is present on other platforms, the -``[asyncio]`` extra may be installed as follows, which will include an attempt -to build and install ``greenlet``:: +For other platforms, **greenlet does not install by default**; +the current file listing for greenlet can be seen at +`Greenlet - Download Files `_. +Note that **there are many architectures omitted, including Apple M1**. + +To install SQLAlchemy while ensuring the ``greenlet`` dependency is present +regardless of what platform is in use, the +``[asyncio]`` `setuptools extra `_ +may be installed +as follows, which will include also instruct ``pip`` to install ``greenlet``:: pip install sqlalchemy[asyncio] +Note that installation of ``greenlet`` on platforms that do not have a pre-built +wheel file means that ``greenlet`` will be built from source, which requires +that Python's development libraries also be present. + Synopsis - Core --------------- From ad4a0458a75d49658f43ba2acbb1826b59a8025e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 8 Apr 2022 10:36:50 -0400 Subject: [PATCH 198/632] clarify autoflush setting does not apply to commit Change-Id: Icad0f3bd071422b8d1af204c9a1193a9ce6124ba References: #7916 (cherry picked from commit ed2b29dc344c9cb65745c767b755f82d913695b8) --- doc/build/orm/session_basics.rst | 58 ++++++++++++++++++++------------ lib/sqlalchemy/orm/session.py | 4 +++ 2 files changed, 40 insertions(+), 22 deletions(-) diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index d60db4d73c7..8d3ebb735f2 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -424,39 +424,53 @@ a :term:`2.0-style` :meth:`_orm.Session.execute` call, as well as within the committed. It also occurs before a SAVEPOINT is issued when :meth:`~.Session.begin_nested` is used. -Regardless of the autoflush setting, a flush can always be forced by issuing -:meth:`~.Session.flush`:: +A :class:`.Session` flush can be forced at any time by calling the +:meth:`~.Session.flush` method:: session.flush() -The "flush-on-Query" aspect of the behavior can be disabled by constructing -:class:`.sessionmaker` with the flag ``autoflush=False``:: +The flush which occurs automatically within the scope of +:meth:`_orm.Session.execute`, :class:`_query.Query`, as well as other +:class:`.Session` methods such as :meth:`.Session.merge` (but **not** including +the :meth:`.Session.commit` method) is known as **autoflush**. This "autoflush" +behavior can be disabled by constructing a :class:`.Session` or +:class:`.sessionmaker` passing the :paramref:`.Session.autoflush` parameter as +``False``:: Session = sessionmaker(autoflush=False) -Additionally, autoflush can be temporarily disabled by setting the -``autoflush`` flag at any time:: - - mysession = Session() - mysession.autoflush = False - -More conveniently, it can be turned off within a context managed block using :attr:`.Session.no_autoflush`:: +Additionally, autoflush can be temporarily disabled within the flow +of using a :class:`.Session` using the +:attr:`.Session.no_autoflush` context manager:: with mysession.no_autoflush: mysession.add(some_object) mysession.flush() -The flush process *always* occurs within a transaction, even if the -:class:`~sqlalchemy.orm.session.Session` has been configured with -``autocommit=True``, a setting that disables the session's persistent -transactional state. If no transaction is present, -:meth:`~.Session.flush` creates its own transaction and -commits it. Any failures during flush will always result in a rollback of -whatever transaction is present. If the Session is not in ``autocommit=True`` -mode, an explicit call to :meth:`~.Session.rollback` is -required after a flush fails, even though the underlying transaction will have -been rolled back already - this is so that the overall nesting pattern of -so-called "subtransactions" is consistently maintained. +The flush process **always occurs** when the :meth:`.Session.commit` method is +called, regardless of any "autoflush" settings, when the :class:`.Session` has +remaining pending changes to process. + +The flush process *always* occurs within a transaction, (subject to the +:ref:`isolation level ` of the database +transaction), provided that the DBAPI is not in +:ref:`driver level autocommit ` mode. This includes even if +the :class:`~sqlalchemy.orm.session.Session` has been configured with the +deprecated :paramref:`_orm.Session.autocommit` setting, which disables the +session's persistent transactional state. If no transaction is present, +:meth:`~.Session.flush` creates its own transaction and commits it. This means +that assuming the database connection is providing for :term:`atomicity` within +its transactional settings, if any individual DML statement inside the flush +fails, the entire operation will be rolled back. + +Outside of using :paramref:`_orm.Session.autocommit`, when a failure occurs +within a flush, in order to continue using that same :class:`_orm.Session`, an +explicit call to :meth:`~.Session.rollback` is required after a flush fails, +even though the underlying transaction will have been rolled back already (even +if the database driver is technically in driver-level autocommit mode). This is +so that the overall nesting pattern of so-called "subtransactions" is +consistently maintained. The FAQ section :ref:`faq_session_rollback` contains a +more detailed description of this behavior. .. _session_expiring: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index d5a80953d6e..315a1254fdc 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1003,6 +1003,10 @@ def __init__( :meth:`~.Session.flush` are rarely needed; you usually only need to call :meth:`~.Session.commit` (which flushes) to finalize changes. + .. seealso:: + + :ref:`session_flushing` - additional background on autoflush + :param bind: An optional :class:`_engine.Engine` or :class:`_engine.Connection` to which this ``Session`` should be bound. When specified, all SQL From 95c9b32a334c9f8337989a67394a6ccf004341e1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 8 Apr 2022 10:56:54 -0400 Subject: [PATCH 199/632] i forgot about begin_nested() make bulleted lists of what is autoflush and what's not. Change-Id: Id3bc4714013e9df243d804d7b5b60c6ef75e1316 (cherry picked from commit 4acf50c8e86b6b047853b2bc96ccaa494811085f) --- doc/build/orm/session_basics.rst | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 8d3ebb735f2..dd2a868daa5 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -429,11 +429,26 @@ A :class:`.Session` flush can be forced at any time by calling the session.flush() -The flush which occurs automatically within the scope of -:meth:`_orm.Session.execute`, :class:`_query.Query`, as well as other -:class:`.Session` methods such as :meth:`.Session.merge` (but **not** including -the :meth:`.Session.commit` method) is known as **autoflush**. This "autoflush" -behavior can be disabled by constructing a :class:`.Session` or +The flush which occurs automatically within the scope of certain methods +is known as **autoflush**. Autoflush is defined as a configurable, +automatic flush call which occurs at the beginning of methods including: + +* :meth:`_orm.Session.execute` and other SQL-executing methods +* When a :class:`_query.Query` is invoked to send SQL to the database +* Within the :meth:`.Session.merge` method before querying the database +* When objects are :ref:`refreshed ` +* When ORM :term:`lazy load` operations occur against unloaded object + attributes. + +There are also points at which flushes occur **unconditionally**; these +points are within key transactional boundaries which include: + +* Within the process of the :meth:`.Session.commit` method +* When :meth:`.Session.begin_nested` is called +* When the :meth:`.Session.prepare` 2PC method is used. + +The **autoflush** behavior, as applied to the previous list of items, +can be disabled by constructing a :class:`.Session` or :class:`.sessionmaker` passing the :paramref:`.Session.autoflush` parameter as ``False``:: @@ -447,7 +462,8 @@ of using a :class:`.Session` using the mysession.add(some_object) mysession.flush() -The flush process **always occurs** when the :meth:`.Session.commit` method is +**To reiterate:** The flush process **always occurs** when transactional +methods such as :meth:`.Session.commit` and :meth:`.Session.begin_nested` are called, regardless of any "autoflush" settings, when the :class:`.Session` has remaining pending changes to process. From 5055f5f5a07948b701c635db9b3e5c98a0cdf4de Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 9 Apr 2022 09:50:18 -0400 Subject: [PATCH 200/632] add sane_rowcount to SimpleUpdateDeleteTest For third party dialects, repaired a missing requirement for the ``SimpleUpdateDeleteTest`` suite test which was not checking for a working "rowcount" function on the target dialect. Fixes: #7919 Change-Id: I2bc68132131eb36c43b8dabec0fac86272e26df5 (cherry picked from commit f3a65eb9033397fbf746fbf71df19ca9d1fce2f4) --- doc/build/changelog/unreleased_14/7919.rst | 8 ++++++++ lib/sqlalchemy/testing/suite/test_update_delete.py | 1 + 2 files changed, 9 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7919.rst diff --git a/doc/build/changelog/unreleased_14/7919.rst b/doc/build/changelog/unreleased_14/7919.rst new file mode 100644 index 00000000000..fdba724e8a0 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7919.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, tests + :tickets: 7919 + + For third party dialects, repaired a missing requirement for the + ``SimpleUpdateDeleteTest`` suite test which was not checking for a working + "rowcount" function on the target dialect. + diff --git a/lib/sqlalchemy/testing/suite/test_update_delete.py b/lib/sqlalchemy/testing/suite/test_update_delete.py index f5ee2e02815..f04a9d57ef0 100644 --- a/lib/sqlalchemy/testing/suite/test_update_delete.py +++ b/lib/sqlalchemy/testing/suite/test_update_delete.py @@ -8,6 +8,7 @@ class SimpleUpdateDeleteTest(fixtures.TablesTest): run_deletes = "each" + __requires__ = ("sane_rowcount",) __backend__ = True @classmethod From 5e19643ea9db523742d5b9e00384e3d3c2ff9e8c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 9 Apr 2022 10:02:35 -0400 Subject: [PATCH 201/632] update DDL examples removing execute_if Fixes: #7924 Change-Id: I684bf3720010ffe34dbdd39ec8c7e0c4af94e620 (cherry picked from commit 1eeded1687aef3132b2a1d5995321fb20b2b8dfc) --- lib/sqlalchemy/ext/compiler.py | 19 +++++++++++++------ lib/sqlalchemy/sql/ddl.py | 6 ++++-- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index b97b23e660e..76b59ea6e37 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -282,12 +282,19 @@ def compile(element, compiler, **kw): raise TypeError("coalesce only supports two arguments on Oracle") return "nvl(%s)" % compiler.process(element.clauses, **kw) -* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, - like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` - subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. - ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the - ``execute_at()`` method, allowing the construct to be invoked during CREATE - TABLE and DROP TABLE sequences. +* :class:`.DDLElement` - The root of all DDL expressions, + like CREATE TABLE, ALTER TABLE, etc. Compilation of :class:`.DDLElement` + subclasses is issued by a :class:`.DDLCompiler` instead of a + :class:`.SQLCompiler`. :class:`.DDLElement` can also be used as an event hook + in conjunction with event hooks like :meth:`.DDLEvents.before_create` and + :meth:`.DDLEvents.after_create`, allowing the construct to be invoked + automatically during CREATE TABLE and DROP TABLE sequences. + + .. seealso:: + + :ref:`metadata_ddl_toplevel` - contains examples of associating + :class:`.DDL` objects (which are themselves :class:`.DDLElement` + instances) with :class:`.DDLEvents` event hooks. * :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be used with any expression class that represents a "standalone" diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index bf44bfdb145..e608052f35f 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -312,8 +312,10 @@ def __init__(self, statement, context=None, bind=None): :param statement: A string or unicode string to be executed. Statements will be - processed with Python's string formatting operator. See the - ``context`` argument and the ``execute_at`` method. + processed with Python's string formatting operator using + a fixed set of string substitutions, as well as additional + substitutions provided by the optional :paramref:`.DDL.context` + parameter. A literal '%' in a statement must be escaped as '%%'. From 7097abc3918c4e2234ae07ab14b91bef4fc46644 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 11 Apr 2022 22:21:20 +0200 Subject: [PATCH 202/632] update flake8 noqa skips with proper syntax Change-Id: I42ed77f559e3ee5b8c600d98457ee37803ef0ea6 (cherry picked from commit 139c6ec0fb1f930be9b64545262d2580f6cbc83e) --- lib/sqlalchemy/dialects/oracle/provision.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/engine/cursor.py | 2 +- lib/sqlalchemy/ext/asyncio/scoping.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 2 +- lib/sqlalchemy/orm/decl_api.py | 2 +- lib/sqlalchemy/orm/loading.py | 2 +- lib/sqlalchemy/sql/dml.py | 4 +-- lib/sqlalchemy/sql/functions.py | 14 +++++----- lib/sqlalchemy/sql/schema.py | 2 +- lib/sqlalchemy/sql/selectable.py | 12 ++++----- lib/sqlalchemy/testing/requirements.py | 2 +- lib/sqlalchemy/util/concurrency.py | 26 +++++++++---------- lib/sqlalchemy/util/langhelpers.py | 8 +++--- test/aaa_profiling/test_orm.py | 18 ++++++------- test/engine/test_execute.py | 8 +++--- .../ensure_descriptor_type_fully_inferred.py | 2 +- .../ensure_descriptor_type_noninferred.py | 2 +- .../ensure_descriptor_type_semiinferred.py | 2 +- .../mypy/files/invalid_noninferred_lh_type.py | 2 +- .../mypy/files/typeless_fk_col_cant_infer.py | 2 +- test/ext/mypy/test_mypy_plugin_py3k.py | 2 +- test/orm/test_deprecations.py | 6 ++--- test/sql/test_functions.py | 2 +- 24 files changed, 64 insertions(+), 64 deletions(-) diff --git a/lib/sqlalchemy/dialects/oracle/provision.py b/lib/sqlalchemy/dialects/oracle/provision.py index 8ce58782be0..74ad1f2a4b1 100644 --- a/lib/sqlalchemy/dialects/oracle/provision.py +++ b/lib/sqlalchemy/dialects/oracle/provision.py @@ -63,7 +63,7 @@ def stop_test_class_outside_fixtures(config, db, cls): try: with db.begin() as conn: # run magic command to get rid of identity sequences - # https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa E501 + # https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501 conn.exec_driver_sql("purge recyclebin") except exc.DatabaseError as err: log.warning("purge recyclebin command failed: %s", err) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 4f63002bc9b..bbc64cf710a 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1421,7 +1421,7 @@ def bind_expression(self, bindvalue): ) -""" # noqa E501 +""" # noqa: E501 from collections import defaultdict import datetime as dt diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index abe42c96ad8..e17422e1c31 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1683,7 +1683,7 @@ def rowcount(self): :ref:`tutorial_update_delete_rowcount` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 try: return self.context.rowcount diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 46c8f0baa77..8eca8c52480 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -85,7 +85,7 @@ def __init__(self, session_factory, scopefunc): the current scope. A function such as ``asyncio.current_task`` may be useful here. - """ # noqa E501 + """ # noqa: E501 self.session_factory = session_factory self.registry = ScopedRegistry(session_factory, scopefunc) diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 0b428d7fedd..53ebbc00978 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -503,7 +503,7 @@ def get_bind(self, mapper=None, clause=None, **kw): blocking-style code, which will be translated to implicitly async calls at the point of invoking IO on the database drivers. - """ # noqa E501 + """ # noqa: E501 return self.sync_session.get_bind( mapper=mapper, clause=clause, bind=bind, **kw diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 42419e48cde..4b6c710c72d 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -200,7 +200,7 @@ class AddressMixin: :ref:`orm_declarative_dataclasses_mixin` - illustrates special forms for use with Python dataclasses - """ # noqa E501 + """ # noqa: E501 def __init__(self, fget, cascading=False): super(declared_attr, self).__init__(fget) diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index 7dee717c68d..b5691c0c978 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -758,7 +758,7 @@ def _instance_processor( # test.orm.inheritance.test_basic -> # EagerTargetingTest.test_adapt_stringency # OptimizedLoadTest.test_column_expression_joined - # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa E501 + # PolymorphicOnNotLocalTest.test_polymorphic_on_column_prop # noqa: E501 # adapted_col = adapter.columns[col] diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index 943bb070546..4f3280373bd 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -439,7 +439,7 @@ def returning(self, *cols): :ref:`tutorial_insert_returning` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 if self._return_defaults: raise exc.InvalidRequestError( "return_defaults() is already configured on this statement" @@ -578,7 +578,7 @@ def returning_column_descriptions(self): :ref:`queryguide_inspection` - ORM background - """ # noqa E501 + """ # noqa: E501 meth = DMLState.get_plugin_class( self ).get_returning_column_descriptions diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index e0ff1655f9f..584782b2818 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -167,7 +167,7 @@ def scalar_table_valued(self, name, type_=None): :meth:`_functions.FunctionElement.column_valued` - """ # noqa E501 + """ # noqa: E501 return ScalarFunctionColumn(self, name, type_) @@ -237,7 +237,7 @@ def table_valued(self, *expr, **kw): :meth:`_sql.TableValuedAlias.render_derived` - renders the alias using a derived column clause, e.g. ``AS name(col1, col2, ...)`` - """ # noqa 501 + """ # noqa: 501 new_func = self._generate() @@ -280,7 +280,7 @@ def column_valued(self, name=None): :meth:`_functions.FunctionElement.table_valued` - """ # noqa 501 + """ # noqa: 501 return self.alias(name=name).column @@ -305,7 +305,7 @@ def columns(self): :meth:`_functions.FunctionElement.table_valued` - generates table-valued SQL function expressions. - """ # noqa E501 + """ # noqa: E501 return ColumnCollection( columns=[(col.key, col) for col in self._all_selected_columns] @@ -1170,19 +1170,19 @@ class coalesce(ReturnTypeFromArgs): inherit_cache = True -class max(ReturnTypeFromArgs): # noqa A001 +class max(ReturnTypeFromArgs): # noqa: A001 """The SQL MAX() aggregate function.""" inherit_cache = True -class min(ReturnTypeFromArgs): # noqa A001 +class min(ReturnTypeFromArgs): # noqa: A001 """The SQL MIN() aggregate function.""" inherit_cache = True -class sum(ReturnTypeFromArgs): # noqa A001 +class sum(ReturnTypeFromArgs): # noqa: A001 """The SQL SUM() aggregate function.""" inherit_cache = True diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 621efd36ce1..d91e4e1661d 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1661,7 +1661,7 @@ def __init__(self, *args, **kwargs): parameter to :class:`_schema.Column`. - """ # noqa E501 + """ # noqa: E501, RST201, RST202 name = kwargs.pop("name", None) type_ = kwargs.pop("type_", None) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index ea81ce67058..740085043c3 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -889,7 +889,7 @@ def _anonymous_fromclause(self, name=None, flat=False): .. versionadded:: 1.4 -""", # noqa E501 +""", # noqa: E501 ) LABEL_STYLE_TABLENAME_PLUS_COL = util.symbol( @@ -919,7 +919,7 @@ def _anonymous_fromclause(self, name=None, flat=False): .. versionadded:: 1.4 -""", # noqa E501 +""", # noqa: E501 ) @@ -1757,7 +1757,7 @@ class TableValuedAlias(Alias): :ref:`tutorial_functions_table_valued` - in the :ref:`unified_tutorial` - """ # noqa E501 + """ # noqa: E501 __visit_name__ = "table_valued_alias" @@ -1884,7 +1884,7 @@ def render_derived(self, name=None, with_types=False): datatype specification with each column. This is a special syntax currently known to be required by PostgreSQL for some SQL functions. - """ # noqa E501 + """ # noqa: E501 # note: don't use the @_generative system here, keep a reference # to the original object. otherwise you can have re-use of the @@ -5897,7 +5897,7 @@ def with_only_columns(self, *columns, **kw): .. versionadded:: 1.4.23 - """ # noqa E501 + """ # noqa: E501 # memoizations should be cleared here as of # I95c560ffcbfa30b26644999412fb6a385125f663 , asserting this @@ -6709,7 +6709,7 @@ def __init__(self, *args, **kwargs): :meth:`_sql.SelectBase.exists` - method to transform a ``SELECT`` to an ``EXISTS`` clause. - """ # noqa E501 + """ # noqa: E501 if args and isinstance(args[0], (SelectBase, ScalarSelect)): s = args[0] else: diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index ce1b5d3c1c3..49e3cefb413 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1386,7 +1386,7 @@ def async_dialect(self): def greenlet(self): def go(config): try: - import greenlet # noqa F401 + import greenlet # noqa: F401 except ImportError: return False else: diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index 9eb44f4657e..e900b437e7f 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -12,7 +12,7 @@ if compat.py3k: try: - import greenlet # noqa F401 + import greenlet # noqa: F401 except ImportError as e: greenlet_error = str(e) else: @@ -22,18 +22,18 @@ from ._concurrency_py3k import greenlet_spawn from ._concurrency_py3k import is_exit_exception from ._concurrency_py3k import AsyncAdaptedLock - from ._concurrency_py3k import _util_async_run # noqa F401 + from ._concurrency_py3k import _util_async_run # noqa: F401 from ._concurrency_py3k import ( _util_async_run_coroutine_function, - ) # noqa F401, E501 - from ._concurrency_py3k import asyncio # noqa F401 + ) # noqa: F401, E501 + from ._concurrency_py3k import asyncio # noqa: F401 # does not need greennlet, just Python 3 - from ._compat_py3k import asynccontextmanager # noqa F401 + from ._compat_py3k import asynccontextmanager # noqa: F401 if not have_greenlet: - asyncio = None # noqa F811 + asyncio = None # noqa: F811 def _not_implemented(): # this conditional is to prevent pylance from considering @@ -51,23 +51,23 @@ def _not_implemented(): else "" ) - def is_exit_exception(e): # noqa F811 + def is_exit_exception(e): # noqa: F811 return not isinstance(e, Exception) - def await_only(thing): # noqa F811 + def await_only(thing): # noqa: F811 _not_implemented() - def await_fallback(thing): # noqa F81 + def await_fallback(thing): # noqa: F811 return thing - def greenlet_spawn(fn, *args, **kw): # noqa F81 + def greenlet_spawn(fn, *args, **kw): # noqa: F811 _not_implemented() - def AsyncAdaptedLock(*args, **kw): # noqa F81 + def AsyncAdaptedLock(*args, **kw): # noqa: F811 _not_implemented() - def _util_async_run(fn, *arg, **kw): # noqa F81 + def _util_async_run(fn, *arg, **kw): # noqa: F811 return fn(*arg, **kw) - def _util_async_run_coroutine_function(fn, *arg, **kw): # noqa F81 + def _util_async_run_coroutine_function(fn, *arg, **kw): # noqa: F811 _not_implemented() diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index 68074dc3354..8d5de183140 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -668,7 +668,7 @@ def instrument(name, clslevel=False): else: code = ( "def %(name)s(%(args)s):\n" - " return %(self_arg)s._proxied.%(name)s(%(apply_kw_proxied)s)" # noqa E501 + " return %(self_arg)s._proxied.%(name)s(%(apply_kw_proxied)s)" # noqa: E501 % metadata ) @@ -1932,9 +1932,9 @@ def repr_tuple_names(names): def has_compiled_ext(): try: - from sqlalchemy import cimmutabledict # noqa F401 - from sqlalchemy import cprocessors # noqa F401 - from sqlalchemy import cresultproxy # noqa F401 + from sqlalchemy import cimmutabledict # noqa: F401 + from sqlalchemy import cprocessors # noqa: F401 + from sqlalchemy import cresultproxy # noqa: F401 return True except ImportError: diff --git a/test/aaa_profiling/test_orm.py b/test/aaa_profiling/test_orm.py index 65b1547a183..bbcb6ad5b8b 100644 --- a/test/aaa_profiling/test_orm.py +++ b/test/aaa_profiling/test_orm.py @@ -1227,7 +1227,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1243,7 +1243,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1257,7 +1257,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1271,7 +1271,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1286,7 +1286,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1301,7 +1301,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1315,7 +1315,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1330,7 +1330,7 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() @@ -1344,6 +1344,6 @@ def go(): for i in range(100): # test counts assume objects remain in the session # from previous run - r = q.all() # noqa F841 + r = q.all() # noqa: F841 go() diff --git a/test/engine/test_execute.py b/test/engine/test_execute.py index 2a61bd1f636..afb7ddd4d41 100644 --- a/test/engine/test_execute.py +++ b/test/engine/test_execute.py @@ -471,7 +471,7 @@ def test_stmt_exception_bytestring_utf8(self): eq_(str(err), message) # unicode accessor decodes to utf-8 - eq_(unicode(err), util.u("some message méil")) # noqa F821 + eq_(unicode(err), util.u("some message méil")) # noqa: F821 else: eq_(str(err), util.u("some message méil")) @@ -486,7 +486,7 @@ def test_stmt_exception_bytestring_latin1(self): eq_(str(err), message) # unicode accessor decodes to utf-8 - eq_(unicode(err), util.u("some message m\\xe9il")) # noqa F821 + eq_(unicode(err), util.u("some message m\\xe9il")) # noqa: F821 else: eq_(str(err), util.u("some message m\\xe9il")) @@ -497,7 +497,7 @@ def test_stmt_exception_unicode_hook_unicode(self): err = tsa.exc.SQLAlchemyError(message) if util.py2k: - eq_(unicode(err), util.u("some message méil")) # noqa F821 + eq_(unicode(err), util.u("some message méil")) # noqa: F821 else: eq_(str(err), util.u("some message méil")) @@ -506,7 +506,7 @@ def test_stmt_exception_object_arg(self): eq_(str(err), "foo") if util.py2k: - eq_(unicode(err), util.u("fóó")) # noqa F821 + eq_(unicode(err), util.u("fóó")) # noqa: F821 def test_stmt_exception_str_multi_args(self): err = tsa.exc.SQLAlchemyError("some message", 206) diff --git a/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py b/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py index 1a89041474b..9ee9c76f467 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_fully_inferred.py @@ -16,5 +16,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa: E501 p: str = u1.name diff --git a/test/ext/mypy/files/ensure_descriptor_type_noninferred.py b/test/ext/mypy/files/ensure_descriptor_type_noninferred.py index b1dabe8dc9b..e8ce35114e7 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_noninferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_noninferred.py @@ -19,5 +19,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "Optional[int]") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "Optional[int]") # noqa: E501 p: Optional[int] = u1.name diff --git a/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py b/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py index 2154ff074c6..d72649b62a4 100644 --- a/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py +++ b/test/ext/mypy/files/ensure_descriptor_type_semiinferred.py @@ -22,5 +22,5 @@ class User: u1 = User() -# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa E501 +# EXPECTED_MYPY: Incompatible types in assignment (expression has type "Optional[str]", variable has type "str") # noqa: E501 p: str = u1.name diff --git a/test/ext/mypy/files/invalid_noninferred_lh_type.py b/test/ext/mypy/files/invalid_noninferred_lh_type.py index 5084de7225e..e9ff303ca78 100644 --- a/test/ext/mypy/files/invalid_noninferred_lh_type.py +++ b/test/ext/mypy/files/invalid_noninferred_lh_type.py @@ -11,5 +11,5 @@ class User: __tablename__ = "user" id = Column(Integer(), primary_key=True) - # EXPECTED: Left hand assignment 'name: "int"' not compatible with ORM mapped expression # noqa E501 + # EXPECTED: Left hand assignment 'name: "int"' not compatible with ORM mapped expression # noqa: E501 name: int = Column(String()) diff --git a/test/ext/mypy/files/typeless_fk_col_cant_infer.py b/test/ext/mypy/files/typeless_fk_col_cant_infer.py index beb4a7a5d0c..0b933db4785 100644 --- a/test/ext/mypy/files/typeless_fk_col_cant_infer.py +++ b/test/ext/mypy/files/typeless_fk_col_cant_infer.py @@ -20,6 +20,6 @@ class Address: __tablename__ = "address" id = Column(Integer, primary_key=True) - # EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'user_id'; # noqa E501 + # EXPECTED: Can't infer type from ORM mapped expression assigned to attribute 'user_id'; # noqa: E501 user_id = Column(ForeignKey("user.id")) email_address = Column(String) diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index 681c9d57bab..5a6a2972732 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -174,7 +174,7 @@ def test_mypy(self, mypy_runner, path): if m: is_mypy = bool(m.group(1)) expected_msg = m.group(2) - expected_msg = re.sub(r"# noqa ?.*", "", m.group(2)) + expected_msg = re.sub(r"# noqa[:]? ?.*", "", m.group(2)) expected_errors.append( (num, is_mypy, expected_msg.strip()) ) diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 253dfa17b14..33a91184c26 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -3151,7 +3151,7 @@ def test_multiple_adaption(self): mach_alias = machines.select() # note python 2 does not allow parens here; reformat in py3 only - with DeprecatedQueryTest._expect_implicit_subquery(), _aliased_join_warning( # noqa E501 + with DeprecatedQueryTest._expect_implicit_subquery(), _aliased_join_warning( # noqa: E501 "Person->people" ): self.assert_compile( @@ -3884,7 +3884,7 @@ def test_illegal_non_primary(self): with testing.expect_deprecated( "The mapper.non_primary parameter is deprecated" ): - m = self.mapper_registry.map_imperatively( # noqa F841 + m = self.mapper_registry.map_imperatively( # noqa: F841 User, users, non_primary=True, @@ -3947,7 +3947,7 @@ def test_illegal_non_primary_legacy(self): with testing.expect_deprecated( "The mapper.non_primary parameter is deprecated" ): - m = mapper( # noqa F841 + m = mapper( # noqa: F841 User, users, non_primary=True, diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py index c4326b8abc8..3a9a06728cb 100644 --- a/test/sql/test_functions.py +++ b/test/sql/test_functions.py @@ -1399,7 +1399,7 @@ def test_named_with_ordinality(self): `WITH ORDINALITY AS unnested(unnested, ordinality) ON true LEFT OUTER JOIN b ON unnested.unnested = b.ref - """ # noqa 501 + """ # noqa: 501 a = table("a", column("id"), column("refs")) b = table("b", column("id"), column("ref")) From 2d7d343823f06dd31503368b4f7ad060e802d0bb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 11 Apr 2022 22:03:28 -0400 Subject: [PATCH 203/632] repair ancient and incorrect comment it referred towards _columntoproperty refering to lists of MapperProperty. this comment goes all the way to pre 0.1 being released. it's likely been wrong for nearly all that time. Change-Id: I71234ae58a6253249d92224356e38372e4aff148 (cherry picked from commit 8254e3a28a32b7097fb926a373c17c35d4ec1d57) --- lib/sqlalchemy/orm/mapper.py | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index b4130b91941..ad68820125f 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1415,19 +1415,14 @@ def _configure_pks(self): ) def _configure_properties(self): - # Column and other ClauseElement objects which are mapped - # TODO: technically this should be a DedupeColumnCollection - # however DCC needs changes and more tests to fully cover - # storing columns under a separate key name + # TODO: consider using DedupeColumnCollection self.columns = self.c = sql_base.ColumnCollection() # object attribute names mapped to MapperProperty objects self._props = util.OrderedDict() - # table columns mapped to lists of MapperProperty objects - # using a list allows a single column to be defined as - # populating multiple object attributes + # table columns mapped to MapperProperty self._columntoproperty = _ColumnMapping(self) # load custom properties @@ -3618,6 +3613,7 @@ class _ColumnMapping(dict): __slots__ = ("mapper",) def __init__(self, mapper): + # TODO: weakref would be a good idea here self.mapper = mapper def __missing__(self, column): From 289894f9af4bebee499969ee8701e06eb8527913 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 4 Apr 2022 19:01:54 -0400 Subject: [PATCH 204/632] read from cls.__dict__ so init_subclass works Modified the :class:`.DeclarativeMeta` metaclass to pass ``cls.__dict__`` into the declarative scanning process to look for attributes, rather than the separate dictionary passed to the type's ``__init__()`` method. This allows user-defined base classes that add attributes within an ``__init_subclass__()`` to work as expected, as ``__init_subclass__()`` can only affect the ``cls.__dict__`` itself and not the other dictionary. This is technically a regression from 1.3 where ``__dict__`` was being used. Additionally makes the reference between ClassManager and the declarative configuration object a weak reference, so that it can be discarded after mappers are set up. Fixes: #7900 Change-Id: I3c2fd4e227cc1891aa4bb3d7d5b43d5686f9f27c (cherry picked from commit 428ea01f00a9cc7f85e435018565eb6da7af1b77) --- doc/build/changelog/unreleased_14/7900.rst | 14 ++++++++++++++ lib/sqlalchemy/orm/decl_api.py | 7 ++++++- lib/sqlalchemy/orm/decl_base.py | 13 +++++++++++-- lib/sqlalchemy/orm/instrumentation.py | 4 +++- test/orm/declarative/test_mixin.py | 20 +++++++++++++++++++- 5 files changed, 53 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7900.rst diff --git a/doc/build/changelog/unreleased_14/7900.rst b/doc/build/changelog/unreleased_14/7900.rst new file mode 100644 index 00000000000..9d6d507703c --- /dev/null +++ b/doc/build/changelog/unreleased_14/7900.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, orm, declarative + :tickets: 7900 + + Modified the :class:`.DeclarativeMeta` metaclass to pass ``cls.__dict__`` + into the declarative scanning process to look for attributes, rather than + the separate dictionary passed to the type's ``__init__()`` method. This + allows user-defined base classes that add attributes within an + ``__init_subclass__()`` to work as expected, as ``__init_subclass__()`` can + only affect the ``cls.__dict__`` itself and not the other dictionary. This + is technically a regression from 1.3 where ``__dict__`` was being used. + + + diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 4b6c710c72d..16f91c69ddb 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -54,6 +54,10 @@ def has_inherited_table(cls): class DeclarativeMeta(type): def __init__(cls, classname, bases, dict_, **kw): + # use cls.__dict__, which can be modified by an + # __init_subclass__() method (#7900) + dict_ = cls.__dict__ + # early-consume registry from the initial declarative base, # assign privately to not conflict with subclass attributes named # "registry" @@ -228,7 +232,8 @@ def __get__(desc, self, cls): # here, we are inside of the declarative scan. use the registry # that is tracking the values of these attributes. - declarative_scan = manager.declarative_scan + declarative_scan = manager.declarative_scan() + assert declarative_scan is not None reg = declarative_scan.declared_attr_reg if desc in reg: diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index 6f02e569774..ed4ccd19682 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -152,7 +152,13 @@ def _check_declared_props_nocascade(obj, name, cls): class _MapperConfig(object): - __slots__ = ("cls", "classname", "properties", "declared_attr_reg") + __slots__ = ( + "cls", + "classname", + "properties", + "declared_attr_reg", + "__weakref__", + ) @classmethod def setup_mapping(cls, registry, cls_, dict_, table, mapper_kw): @@ -300,9 +306,12 @@ def __init__( mapper_kw, ): + # grab class dict before the instrumentation manager has been added. + # reduces cycles + self.dict_ = dict(dict_) if dict_ else {} + super(_ClassScanMapperConfig, self).__init__(registry, cls_, mapper_kw) - self.dict_ = dict(dict_) if dict_ else {} self.persist_selectable = None self.declared_columns = set() self.column_copies = {} diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index 97692b6421c..a7023a21d98 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -30,6 +30,8 @@ """ +import weakref + from . import base from . import collections from . import exc @@ -131,7 +133,7 @@ def _update_state( if registry: registry._add_manager(self) if declarative_scan: - self.declarative_scan = declarative_scan + self.declarative_scan = weakref.ref(declarative_scan) if expired_attribute_loader: self.expired_attribute_loader = expired_attribute_loader diff --git a/test/orm/declarative/test_mixin.py b/test/orm/declarative/test_mixin.py index 5a4673a23ed..78ab4dbfc3e 100644 --- a/test/orm/declarative/test_mixin.py +++ b/test/orm/declarative/test_mixin.py @@ -38,7 +38,11 @@ mapper_registry = None -class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults): +class DeclarativeTestBase( + testing.AssertsCompiledSQL, + fixtures.TestBase, + testing.AssertsExecutionResults, +): def setup_test(self): global Base, mapper_registry @@ -53,6 +57,20 @@ def teardown_test(self): class DeclarativeMixinTest(DeclarativeTestBase): + @testing.requires.python3 + def test_init_subclass_works(self, registry): + class Base: + def __init_subclass__(cls): + cls.id = Column(Integer, primary_key=True) + + Base = registry.generate_base(cls=Base) + + class Foo(Base): + __tablename__ = "foo" + name = Column(String) + + self.assert_compile(select(Foo), "SELECT foo.name, foo.id FROM foo") + def test_simple_wbase(self): class MyMixin(object): From 86f6409d883e9bed5174c521ecaac49ab0c0474d Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 11 Apr 2022 23:19:16 +0200 Subject: [PATCH 205/632] Fix psycopg2 pre_ping with autocommit Fixed an issue what would cause autocommit mode to be reset when using pre_ping in conjunction engine level autocommit on the psycopg2 driver. Fixes: #7930 Change-Id: I4cccaf1b7f8cbacd853689458080784114fcc390 (cherry picked from commit 363b68e08e9ceed4ce6821f5fd48ab32bdfd807c) --- doc/build/changelog/unreleased_14/7930.rst | 7 +++++++ lib/sqlalchemy/dialects/postgresql/psycopg2.py | 8 +++++--- test/dialect/postgresql/test_dialect.py | 17 +++++++++++++++++ 3 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7930.rst diff --git a/doc/build/changelog/unreleased_14/7930.rst b/doc/build/changelog/unreleased_14/7930.rst new file mode 100644 index 00000000000..bf4f9988ca2 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7930.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, postgresql + :tickets: 7930 + + Fixed an issue what would cause autocommit mode to be reset + when using pre_ping in conjunction engine level autocommit + on the psycopg2 driver. diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index adebc9b6769..f7121a82a1a 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -835,15 +835,17 @@ def get_deferrable(self, connection): def do_ping(self, dbapi_connection): cursor = None + before_autocommit = dbapi_connection.autocommit try: - dbapi_connection.autocommit = True + if not before_autocommit: + dbapi_connection.autocommit = True cursor = dbapi_connection.cursor() try: cursor.execute(self._dialect_specific_select_one) finally: cursor.close() - if not dbapi_connection.closed: - dbapi_connection.autocommit = False + if not before_autocommit and not dbapi_connection.closed: + dbapi_connection.autocommit = before_autocommit except self.dbapi.Error as err: if self.is_disconnect(err, dbapi_connection, cursor): return False diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index 1d797a697d7..8aa90364956 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -1095,6 +1095,23 @@ def test_readonly_flag_engine(self, testing_engine, pre_ping): dbapi_conn.rollback() eq_(val, "off") + @testing.combinations((True,), (False,), argnames="autocommit") + def test_autocommit_pre_ping(self, testing_engine, autocommit): + engine = testing_engine( + options={ + "isolation_level": "AUTOCOMMIT" + if autocommit + else "SERIALIZABLE", + "pool_pre_ping": True, + } + ) + for i in range(4): + with engine.connect() as conn: + conn.execute(text("select 1")).scalar() + + dbapi_conn = conn.connection.dbapi_connection + eq_(dbapi_conn.autocommit, autocommit) + def test_deferrable_flag_engine(self): engine = engines.testing_engine( options={ From 0c0bcd95ce843d8541ae952ec5262851d109ab0a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 14 Apr 2022 09:59:11 -0400 Subject: [PATCH 206/632] update Numeric/Float docstrings These docs were very out of date re: cdecimal. Additionally, as pointed out in #5252, the Numeric documentation is misleading; SQLAlchemy's Numeric hierarchy resembles more of the Oracle approach where precision and scale solely determine the kind of datatype being worked with. Float is essentially Numeric with different defaults. Fixes: #5252 Change-Id: I661109fabf04ba7831c7ddafba15321dd445ea5d (cherry picked from commit 3d300066e8b20a89e0b82bf09dd0c4016f5f8e51) --- lib/sqlalchemy/sql/sqltypes.py | 63 ++++++++++++---------------------- 1 file changed, 22 insertions(+), 41 deletions(-) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 8c12d62ddf1..e51397da7f0 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -587,41 +587,34 @@ class BigInteger(Integer): class Numeric(_LookupExpressionAdapter, TypeEngine): - """A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``. + """Base for non-integer numeric types, such as + ``NUMERIC``, ``FLOAT``, ``DECIMAL``, and other variants. - This type returns Python ``decimal.Decimal`` objects by default, unless - the :paramref:`.Numeric.asdecimal` flag is set to False, in which case - they are coerced to Python ``float`` objects. + The :class:`.Numeric` datatype when used directly will render DDL + corresponding to precision numerics if available, such as + ``NUMERIC(precision, scale)``. The :class:`.Float` subclass will + attempt to render a floating-point datatype such as ``FLOAT(precision)``. - .. note:: + :class:`.Numeric` returns Python ``decimal.Decimal`` objects by default, + based on the default value of ``True`` for the + :paramref:`.Numeric.asdecimal` parameter. If this parameter is set to + False, returned values are coerced to Python ``float`` objects. - The :class:`.Numeric` type is designed to receive data from a database - type that is explicitly known to be a decimal type - (e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point - type (e.g. ``FLOAT``, ``REAL``, others). - If the database column on the server is in fact a floating-point - type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. + The :class:`.Float` subtype, being more specific to floating point, + defaults the :paramref:`.Float.asdecimal` flag to False so that the + default Python datatype is ``float``. .. note:: - The Python ``decimal.Decimal`` class is generally slow - performing; cPython 3.3 has now switched to use the `cdecimal - `_ library natively. For - older Python versions, the ``cdecimal`` library can be patched - into any application where it will replace the ``decimal`` - library fully, however this needs to be applied globally and - before any other modules have been imported, as follows:: - - import sys - import cdecimal - sys.modules["decimal"] = cdecimal - - Note that the ``cdecimal`` and ``decimal`` libraries are **not - compatible with each other**, so patching ``cdecimal`` at the - global level is the only way it can be used effectively with - various DBAPIs that hardcode to import the ``decimal`` library. + When using a :class:`.Numeric` datatype against a database type that + returns Python floating point values to the driver, the accuracy of the + decimal conversion indicated by :paramref:`.Numeric.asdecimal` may be + limited. The behavior of specific numeric/floating point datatypes + is a product of the SQL datatype in use, the Python :term:`DBAPI` + in use, as well as strategies that may be present within + the SQLAlchemy dialect in use. Users requiring specific precision/ + scale are encouraged to experiment with the available datatypes + in order to determine the best results. """ @@ -661,8 +654,6 @@ def __init__( value of ".scale" as the default for decimal_return_scale, if not otherwise specified. - .. versionadded:: 0.9.0 - When using the ``Numeric`` type, care should be taken to ensure that the asdecimal setting is appropriate for the DBAPI in use - when Numeric applies a conversion from Decimal->float or float-> @@ -771,16 +762,6 @@ class Float(Numeric): :paramref:`.Float.asdecimal` flag is set to True, in which case they are coerced to ``decimal.Decimal`` objects. - .. note:: - - The :class:`.Float` type is designed to receive data from a database - type that is explicitly known to be a floating point type - (e.g. ``FLOAT``, ``REAL``, others) - and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others). - If the database column on the server is in fact a Numeric - type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric` - type or a subclass, otherwise numeric coercion between - ``float``/``Decimal`` may or may not function as expected. """ From 93401c5f262b22f2ce729a04202ca559f4593976 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 14 Apr 2022 12:01:16 -0400 Subject: [PATCH 207/632] Ensure ORMInsert sets up bind state Fixed regression where the change in #7861, released in version 1.4.33, that brought the :class:`.Insert` construct to be partially recognized as an ORM-enabled statement did not properly transfer the correct mapper / mapped table state to the :class:`.Session`, causing the :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was bound to engines and/or connections using the :paramref:`.Session.binds` parameter. Fixes: #7936 Change-Id: If19edef8e2dd68335465429eb3d2f0bfdade4a4c (cherry picked from commit 4f96c12db923624204110e56ce730f5aafbb9463) --- doc/build/changelog/unreleased_14/7936.rst | 11 ++++++++ lib/sqlalchemy/orm/persistence.py | 8 ++++++ test/orm/test_bind.py | 15 ++++++++++ test/orm/test_update_delete.py | 33 ++++++++++++++++++++++ 4 files changed, 67 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7936.rst diff --git a/doc/build/changelog/unreleased_14/7936.rst b/doc/build/changelog/unreleased_14/7936.rst new file mode 100644 index 00000000000..bcad142b0bc --- /dev/null +++ b/doc/build/changelog/unreleased_14/7936.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 7936 + + Fixed regression where the change in #7861, released in version 1.4.33, + that brought the :class:`.Insert` construct to be partially recognized as + an ORM-enabled statement did not properly transfer the correct mapper / + mapped table state to the :class:`.Session`, causing the + :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was + bound to engines and/or connections using the :paramref:`.Session.binds` + parameter. diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index 654e659f411..a17b24ab5ee 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -2196,6 +2196,14 @@ def orm_pre_session_exec( bind_arguments, is_reentrant_invoke, ): + bind_arguments["clause"] = statement + try: + plugin_subject = statement._propagate_attrs["plugin_subject"] + except KeyError: + assert False, "statement had 'orm' plugin but no plugin_subject" + else: + bind_arguments["mapper"] = plugin_subject.mapper + return ( statement, util.immutabledict(execution_options), diff --git a/test/orm/test_bind.py b/test/orm/test_bind.py index e1cd8fdd8a2..6326e0c4dca 100644 --- a/test/orm/test_bind.py +++ b/test/orm/test_bind.py @@ -323,6 +323,21 @@ def test_get_bind(self, testcase, expected): lambda User: {"clause": mock.ANY, "mapper": inspect(User)}, "e1", ), + ( + lambda User: update(User) + .values(name="not ed") + .where(User.name == "ed"), + lambda User: {"clause": mock.ANY, "mapper": inspect(User)}, + "e1", + ), + ( + lambda User: insert(User).values(name="not ed"), + lambda User: { + "clause": mock.ANY, + "mapper": inspect(User), + }, + "e1", + ), ) def test_bind_through_execute( self, statement, expected_get_bind_args, expected_engine_name diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py index 21863c57a52..255d70f4142 100644 --- a/test/orm/test_update_delete.py +++ b/test/orm/test_update_delete.py @@ -96,6 +96,39 @@ def setup_mappers(cls): ) cls.mapper_registry.map_imperatively(Address, addresses) + @testing.combinations("table", "mapper", "both", argnames="bind_type") + @testing.combinations( + "update", "insert", "delete", argnames="statement_type" + ) + def test_get_bind_scenarios(self, connection, bind_type, statement_type): + """test for #7936""" + + User = self.classes.User + + if statement_type == "insert": + stmt = insert(User).values( + {User.id: 5, User.age: 25, User.name: "spongebob"} + ) + elif statement_type == "update": + stmt = ( + update(User) + .where(User.id == 2) + .values({User.name: "spongebob"}) + ) + elif statement_type == "delete": + stmt = delete(User) + + binds = {} + if bind_type == "both": + binds = {User: connection, User.__table__: connection} + elif bind_type == "mapper": + binds = {User: connection} + elif bind_type == "table": + binds = {User.__table__: connection} + + with Session(binds=binds) as sess: + sess.execute(stmt) + def test_illegal_eval(self): User = self.classes.User s = fixture_session() From 4e213967f751d145fbf52398e8ad37e6bba16f94 Mon Sep 17 00:00:00 2001 From: Sergey Golitsynskiy Date: Thu, 14 Apr 2022 16:01:26 -0400 Subject: [PATCH 208/632] Fix link to RFC 1738 (#7935) --- doc/build/core/engines.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index cb114ef7f9e..0a6e7b3dc13 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -57,7 +57,7 @@ Database Urls The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` object based on a URL. These URLs follow `RFC-1738 -`_, and usually can include username, password, +`_, and usually can include username, password, hostname, database name as well as optional keyword arguments for additional configuration. In some cases a file path is accepted, and in others a "data source name" replaces the "host" and "database" portions. The typical form of a database URL is:: From 2ea124b61f0ef98d398c43299509224e9a9d77f6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 15 Apr 2022 11:11:42 -0400 Subject: [PATCH 209/632] update bake_queries documentation this parameter has had no effect since 1.4.23 in #6889, but the documentation was not updated. Change-Id: I36f4ea6144b9fd09243f96698e7a03cd217da1e7 --- lib/sqlalchemy/orm/relationships.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index 7949e9e4d35..efa6c63f017 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -367,20 +367,11 @@ class name or dotted package-qualified name. :ref:`error_qzyx` - usage example :param bake_queries=True: - Enable :ref:`lambda caching ` for loader - strategies, if applicable, which adds a performance gain to the - construction of SQL constructs used by loader strategies, in addition - to the usual SQL statement caching used throughout SQLAlchemy. This - parameter currently applies only to the "lazy" and "selectin" loader - strategies. There is generally no reason to set this parameter to - False. + Legacy parameter, not used. - .. versionchanged:: 1.4 Relationship loaders no longer use the - previous "baked query" system of query caching. The "lazy" - and "selectin" loaders make use of the "lambda cache" system - for the construction of SQL constructs, - as well as the usual SQL caching system that is throughout - SQLAlchemy as of the 1.4 series. + .. versionchanged:: 1.4.23 the "lambda caching" system is no longer + used by loader strategies and the ``bake_queries`` parameter + has no effect. :param cascade: A comma-separated list of cascade rules which determines how From 549e3a5be6a8c6db7183b1f7d20e8f3f39687ef4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 16 Apr 2022 11:40:13 -0400 Subject: [PATCH 210/632] adapt_from_selectables is a set aliasedclass setting this as a list, making all the containment checks more expensive. the presence of the collection also serves as a flag so it has to stay optional. Change-Id: Iafffbeb29d77441ca35ecd8048244ee6eed1232c (cherry picked from commit c538f810bce57472c8960a0a6c4c61024b00f3ed) --- lib/sqlalchemy/orm/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 9ec2ad0768f..50ac8917d3d 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -708,11 +708,11 @@ def __init__( # make sure the adapter doesn't try to grab other tables that # are not even the thing we are mapping, such as embedded # selectables in subqueries or CTEs. See issue #6060 - adapt_from_selectables=[ + adapt_from_selectables={ m.selectable for m in self.with_polymorphic_mappers if not adapt_on_names - ], + }, ) if nest_adapters: From caea59258a0003f3e1a0c93f21312ba9ff60abc9 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 15 Apr 2022 00:29:01 +0200 Subject: [PATCH 211/632] Allow contextvars to be set in events when using asyncio Allow setting contextvar values inside async adapted event handlers. Previously the value set to the contextvar would not be properly propagated. Fixes: #7937 Change-Id: I787aa869f8d057579e13e32c749f05f184ffd02a (cherry picked from commit 640d163bd8bf61e87790255558b6f704a0d06174) --- doc/build/changelog/unreleased_14/7937.rst | 8 +++++ lib/sqlalchemy/util/_concurrency_py3k.py | 36 +++++++++------------- test/base/test_concurrency_py3k.py | 36 +++++++++++++++++++--- 3 files changed, 53 insertions(+), 27 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7937.rst diff --git a/doc/build/changelog/unreleased_14/7937.rst b/doc/build/changelog/unreleased_14/7937.rst new file mode 100644 index 00000000000..96d80d6cd27 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7937.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, asyncio + :tickets: 7937 + + Allow setting contextvar values inside async adapted event handlers. + Previously the value set to the contextvar would not be properly + propagated. + diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index e3c5dac5800..0b128344d64 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -17,18 +17,10 @@ from .langhelpers import memoized_property from .. import exc -if compat.py37: - try: - from contextvars import copy_context as _copy_context - - # If greenlet.gr_context is present in current version of greenlet, - # it will be set with a copy of the current context on creation. - # Refs: https://github.com/python-greenlet/greenlet/pull/198 - getattr(greenlet.greenlet, "gr_context") - except (ImportError, AttributeError): - _copy_context = None -else: - _copy_context = None +# If greenlet.gr_context is present in current version of greenlet, +# it will be set with the current context on creation. +# Refs: https://github.com/python-greenlet/greenlet/pull/198 +_has_gr_context = hasattr(greenlet.getcurrent(), "gr_context") def is_exit_exception(e): @@ -48,15 +40,15 @@ class _AsyncIoGreenlet(greenlet.greenlet): def __init__(self, fn, driver): greenlet.greenlet.__init__(self, fn, driver) self.driver = driver - if _copy_context is not None: - self.gr_context = _copy_context() + if _has_gr_context: + self.gr_context = driver.gr_context def await_only(awaitable: Coroutine) -> Any: """Awaits an async function in a sync method. The sync method must be inside a :func:`greenlet_spawn` context. - :func:`await_` calls cannot be nested. + :func:`await_only` calls cannot be nested. :param awaitable: The coroutine to call. @@ -65,8 +57,8 @@ def await_only(awaitable: Coroutine) -> Any: current = greenlet.getcurrent() if not isinstance(current, _AsyncIoGreenlet): raise exc.MissingGreenlet( - "greenlet_spawn has not been called; can't call await_() here. " - "Was IO attempted in an unexpected place?" + "greenlet_spawn has not been called; can't call await_only() " + "here. Was IO attempted in an unexpected place?" ) # returns the control to the driver greenlet passing it @@ -80,7 +72,7 @@ def await_fallback(awaitable: Coroutine) -> Any: """Awaits an async function in a sync method. The sync method must be inside a :func:`greenlet_spawn` context. - :func:`await_` calls cannot be nested. + :func:`await_fallback` calls cannot be nested. :param awaitable: The coroutine to call. @@ -92,7 +84,7 @@ def await_fallback(awaitable: Coroutine) -> Any: if loop.is_running(): raise exc.MissingGreenlet( "greenlet_spawn has not been called and asyncio event " - "loop is already running; can't call await_() here. " + "loop is already running; can't call await_fallback() here. " "Was IO attempted in an unexpected place?" ) return loop.run_until_complete(awaitable) @@ -105,7 +97,7 @@ async def greenlet_spawn( ) -> Any: """Runs a sync function ``fn`` in a new greenlet. - The sync function can then use :func:`await_` to wait for async + The sync function can then use :func:`await_only` to wait for async functions. :param fn: The sync callable to call. @@ -115,7 +107,7 @@ async def greenlet_spawn( context = _AsyncIoGreenlet(fn, greenlet.getcurrent()) # runs the function synchronously in gl greenlet. If the execution - # is interrupted by await_, context is not dead and result is a + # is interrupted by await_only, context is not dead and result is a # coroutine to wait. If the context is dead the function has # returned, and its result can be returned. switch_occurred = False @@ -124,7 +116,7 @@ async def greenlet_spawn( while not context.dead: switch_occurred = True try: - # wait for a coroutine from await_ and then return its + # wait for a coroutine from await_only and then return its # result back to it. value = await result except BaseException: diff --git a/test/base/test_concurrency_py3k.py b/test/base/test_concurrency_py3k.py index 0b648aa30bd..de7157c7889 100644 --- a/test/base/test_concurrency_py3k.py +++ b/test/base/test_concurrency_py3k.py @@ -1,3 +1,5 @@ +import asyncio +import random import threading from sqlalchemy import exc @@ -8,7 +10,6 @@ from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_true -from sqlalchemy.util import asyncio from sqlalchemy.util import await_fallback from sqlalchemy.util import await_only from sqlalchemy.util import greenlet_spawn @@ -89,7 +90,8 @@ async def test_await_only_no_greenlet(self): to_await = run1() with expect_raises_message( exc.MissingGreenlet, - r"greenlet_spawn has not been called; can't call await_\(\) here.", + "greenlet_spawn has not been called; " + r"can't call await_only\(\) here.", ): await_only(to_await) @@ -134,7 +136,8 @@ def go(): with expect_raises_message( exc.InvalidRequestError, - r"greenlet_spawn has not been called; can't call await_\(\) here.", + "greenlet_spawn has not been called; " + r"can't call await_only\(\) here.", ): await greenlet_spawn(go) @@ -147,20 +150,43 @@ async def test_contextvars(self): import contextvars var = contextvars.ContextVar("var") - concurrency = 5 + concurrency = 500 + # NOTE: sleep here is not necessary. It's used to simulate IO + # ensuring that task are not run sequentially async def async_inner(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) eq_(val, var.get()) return var.get() + async def async_set(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) + var.set(val) + def inner(val): retval = await_only(async_inner(val)) eq_(val, var.get()) eq_(retval, val) + + # set the value in a sync function + newval = val + concurrency + var.set(newval) + syncset = await_only(async_inner(newval)) + eq_(newval, var.get()) + eq_(syncset, newval) + + # set the value in an async function + retval = val + 2 * concurrency + await_only(async_set(retval)) + eq_(var.get(), retval) + eq_(await_only(async_inner(retval)), retval) + return retval async def task(val): + await asyncio.sleep(random.uniform(0.005, 0.015)) var.set(val) + await asyncio.sleep(random.uniform(0.005, 0.015)) return await greenlet_spawn(inner, val) values = { @@ -169,7 +195,7 @@ async def task(val): [task(i) for i in range(concurrency)] ) } - eq_(values, set(range(concurrency))) + eq_(values, set(range(concurrency * 2, concurrency * 3))) @async_test async def test_require_await(self): From bb42213d839c4d98b27876ce06ed6bbb70586568 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 18 Apr 2022 23:12:31 -0400 Subject: [PATCH 212/632] update ORM join doc forgot to remove string support for the ON clause here. will backport a deprecation message to 1.4. Change-Id: If90e2bff929cce4dc8a6e9bd3ad818b8f8e514a6 (cherry picked from commit 13a8552053c21a9fa7ff6f992ed49ee92cca73e4) --- lib/sqlalchemy/orm/util.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 50ac8917d3d..7f72c1fc086 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1809,30 +1809,35 @@ def join( left and right selectables may be not only core selectable objects such as :class:`_schema.Table`, but also mapped classes or :class:`.AliasedClass` instances. The "on" clause can - be a SQL expression, or an attribute or string name + be a SQL expression or an ORM mapped attribute referencing a configured :func:`_orm.relationship`. + .. deprecated:: 1.4 using a string relationship name for the "onclause" + is deprecated and will be removed in 2.0; the onclause may be only + an ORM-mapped relationship attribute or a SQL expression construct. + :func:`_orm.join` is not commonly needed in modern usage, as its functionality is encapsulated within that of the - :meth:`_query.Query.join` method, which features a + :meth:`_sql.Select.join` and :meth:`_query.Query.join` + methods. which feature a significant amount of automation beyond :func:`_orm.join` - by itself. Explicit usage of :func:`_orm.join` - with :class:`_query.Query` involves usage of the - :meth:`_query.Query.select_from` method, as in:: + by itself. Explicit use of :func:`_orm.join` + with ORM-enabled SELECT statements involves use of the + :meth:`_sql.Select.select_from` method, as in:: from sqlalchemy.orm import join - session.query(User).\ + stmt = select(User).\ select_from(join(User, Address, User.addresses)).\ filter(Address.email_address=='foo@bar.com') In modern SQLAlchemy the above join can be written more succinctly as:: - session.query(User).\ + stmt = select(User).\ join(User.addresses).\ filter(Address.email_address=='foo@bar.com') - See :meth:`_query.Query.join` for information on modern usage + See :ref:`orm_queryguide_joins` for information on modern usage of ORM level joins. .. deprecated:: 0.8 From 3dfe9b9d70e3bc3c0b23d373da753661bb2fe8f8 Mon Sep 17 00:00:00 2001 From: Alex Marvin Date: Wed, 20 Apr 2022 16:31:38 -0400 Subject: [PATCH 213/632] Fixes minor typo (#7950) (cherry picked from commit 8ee0bf9e373fe98af38babcbc97435c3b505d09f) --- doc/build/core/engines.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index 0a6e7b3dc13..1f60ae6253f 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -52,7 +52,7 @@ See the section :ref:`dialect_toplevel` for information on the various backends .. _database_urls: -Database Urls +Database URLs ============= The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` object based From b41c3e7f56234b9873bbb547339b66fdcd10fd95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Gr=C3=B6nholm?= Date: Mon, 18 Apr 2022 13:07:19 -0400 Subject: [PATCH 214/632] Implement UUID.python_type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implemented :attr:`_postgresql.UUID.python_type` attribute for the :class:`_postgresql.UUID` type object. The attribute will return either ``str`` or ``uuid.UUID`` based on the :paramref:`_postgresql.UUID.as_uuid` parameter setting. Previously, this attribute was unimplemented. Pull request courtesy Alex Grönholm. Fixes: #7943 Closes: #7944 Change-Id: Ic4fbaeee134d586b08339801968e787cc7e14285 (cherry picked from commit 408c936c77c6aaeceab0e0b001ed745ceb9d19d4) --- doc/build/changelog/unreleased_14/7943.rst | 9 +++++++++ lib/sqlalchemy/dialects/postgresql/base.py | 4 ++++ test/dialect/postgresql/test_types.py | 4 ++++ 3 files changed, 17 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7943.rst diff --git a/doc/build/changelog/unreleased_14/7943.rst b/doc/build/changelog/unreleased_14/7943.rst new file mode 100644 index 00000000000..e5ed12e7f8f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7943.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, postgresql + :tickets: 7943 + + Implemented :attr:`_postgresql.UUID.python_type` attribute for the + :class:`_postgresql.UUID` type object. The attribute will return either + ``str`` or ``uuid.UUID`` based on the :paramref:`_postgresql.UUID.as_uuid` + parameter setting. Previously, this attribute was unimplemented. Pull + request courtesy Alex Grönholm. \ No newline at end of file diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index bbc64cf710a..7ba996a4a2d 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1824,6 +1824,10 @@ def process(value): return process + @property + def python_type(self): + return _python_UUID if self.as_uuid else str + PGUuid = UUID diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index 8ec345d170a..fe396726270 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -2832,6 +2832,10 @@ def test_uuid_literal(self, datatype, value1, connection): ) eq_(v1.fetchone()[0], value1) + def test_python_type(self): + eq_(postgresql.UUID(as_uuid=True).python_type, uuid.UUID) + eq_(postgresql.UUID(as_uuid=False).python_type, str) + class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): __dialect__ = "postgresql" From f1a409ecdd5f0377b9c00a859ab14e9005e873da Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 21 Apr 2022 13:27:16 -0400 Subject: [PATCH 215/632] warn for result.columns() method A warning is emitted when calling upon the :meth:`.Result.columns` method with only one index, in particular ORM related cases, indicating that the current behavior of :meth:`.Result.columns` is broken in this case and will be changed in 2.0. To receive a collection of scalar values, use the :meth:`.Result.scalars` method. Fixes: #7953 Change-Id: I3c4ca3eecc2bfc85ad1c38000e5990d6dde80d22 (cherry picked from commit fe2045fb1c767436ed1e32359fe005dabead504a) --- doc/build/changelog/unreleased_14/7953.rst | 10 ++++++ lib/sqlalchemy/engine/result.py | 9 +++++ test/base/test_result.py | 40 +++++++++++++++++++++- 3 files changed, 58 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7953.rst diff --git a/doc/build/changelog/unreleased_14/7953.rst b/doc/build/changelog/unreleased_14/7953.rst new file mode 100644 index 00000000000..d4e5037dcd8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7953.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, engine + :tickets: 7953 + + A warning is emitted when calling upon the :meth:`.Result.columns` method + with only one index, in particular ORM related cases, indicating that the + current behavior of :meth:`.Result.columns` is broken in this case and + will be changed in 2.0. To receive a collection of scalar values, + use the :meth:`.Result.scalars` method. + diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index f8006ac53df..cb6906f0368 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -648,6 +648,15 @@ def _column_slices(self, indexes): real_result = self._real_result if self._real_result else self if real_result._source_supports_scalars and len(indexes) == 1: + util.warn_deprecated( + "The Result.columns() method has a bug in SQLAlchemy 1.4 that " + "is causing it to yield scalar values, rather than Row " + "objects, in the case where a single index is passed and the " + "result is against ORM mapped objects. In SQLAlchemy 2.0, " + "Result will continue yield Row objects in this scenario. " + "Use the Result.scalars() method to yield scalar values.", + "2.0", + ) self._generate_rows = False else: self._generate_rows = True diff --git a/test/base/test_result.py b/test/base/test_result.py index 8c9eb398e15..76156db1c38 100644 --- a/test/base/test_result.py +++ b/test/base/test_result.py @@ -5,6 +5,7 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_deprecated from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true @@ -1054,12 +1055,49 @@ def test_scalar_mode_columns0_mapping(self, no_tuple_fixture): metadata, no_tuple_fixture, source_supports_scalars=True ) - r = r.columns(0).mappings() + with expect_deprecated( + r"The Result.columns\(\) method has a bug in SQLAlchemy 1.4 " + r"that is causing it to yield scalar values" + ): + r = r.columns(0).mappings() eq_( list(r), [{"a": 1}, {"a": 2}, {"a": 1}, {"a": 1}, {"a": 4}], ) + def test_scalar_mode_columns0_plain(self, no_tuple_fixture): + """test #7953""" + + metadata = result.SimpleResultMetaData(["a", "b", "c"]) + + r = result.ChunkedIteratorResult( + metadata, no_tuple_fixture, source_supports_scalars=True + ) + + with expect_deprecated( + r"The Result.columns\(\) method has a bug in SQLAlchemy 1.4 " + r"that is causing it to yield scalar values" + ): + r = r.columns(0) + eq_( + list(r), + [1, 2, 1, 1, 4], + # [(1,), (2,), (1,), (1,), (4,)], # correct result + ) + + def test_scalar_mode_scalars0(self, no_tuple_fixture): + metadata = result.SimpleResultMetaData(["a", "b", "c"]) + + r = result.ChunkedIteratorResult( + metadata, no_tuple_fixture, source_supports_scalars=True + ) + + r = r.scalars(0) + eq_( + list(r), + [1, 2, 1, 1, 4], + ) + def test_scalar_mode_but_accessed_nonscalar_result(self, no_tuple_fixture): metadata = result.SimpleResultMetaData(["a", "b", "c"]) From 3daa7c905cf03223655db10b951c5c3511b156df Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 22 Apr 2022 10:27:38 -0400 Subject: [PATCH 216/632] fix memory leak in resultproxy.c the error raised for non-existent attribute didn't free the "name" string, causing a leak. Fixed a memory leak in the C extensions which could occur when calling upon named members of :class:`.Row` when the member does not exist under Python 3; in particular this could occur during numpy transformations when it attempts to call members such as ``.__array__``, but the issue was surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This issue does not apply to version 2.0 which has already transitioned to Cython. Thanks much to Sebastian Berg for identifying the problem. Fixes: #7875 Change-Id: I444026a877ea1473a5ffac592c7f36ed6f4b563e --- doc/build/changelog/unreleased_14/7875.rst | 12 ++++++++++++ lib/sqlalchemy/cextension/resultproxy.c | 1 + 2 files changed, 13 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7875.rst diff --git a/doc/build/changelog/unreleased_14/7875.rst b/doc/build/changelog/unreleased_14/7875.rst new file mode 100644 index 00000000000..a56fa801060 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7875.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, engine + :tickets: 7875 + + Fixed a memory leak in the C extensions which could occur when calling upon + named members of :class:`.Row` when the member does not exist under Python + 3; in particular this could occur during numpy transformations when it + attempts to call members such as ``.__array__``, but the issue was + surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This + issue does not apply to version 2.0 which has already transitioned to + Cython. Thanks much to Sebastian Berg for identifying the problem. + diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index c071ff31731..785ad7e807d 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -541,6 +541,7 @@ BaseRow_getattro(BaseRow *self, PyObject *name) "Could not locate column in row for column '%.200s'", PyBytes_AS_STRING(err_bytes) ); + Py_DECREF(err_bytes); #else PyErr_Format( PyExc_AttributeError, From eb7061ea7d133eb3154a825595ef31df47f1ced2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 22 Apr 2022 10:57:00 -0400 Subject: [PATCH 217/632] properly type array element in any() / all() Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on PostgreSQL where using the ``.any()`` method to render SQL ANY(), given members of the Python enumeration as arguments, would produce a type adaptation failure on all drivers. Fixes: #6515 Change-Id: Ia1e3b4e10aaf264ed436ce6030d105fc60023433 (cherry picked from commit d023c8e1c7ad82fb249fab5155eb83dee17a160c) --- doc/build/changelog/unreleased_14/6515.rst | 8 +++++ lib/sqlalchemy/sql/sqltypes.py | 20 +++++++++-- test/dialect/postgresql/test_compiler.py | 32 ++++++++--------- test/dialect/postgresql/test_types.py | 42 +++++++++++++++++----- test/sql/test_operators.py | 16 ++++----- 5 files changed, 84 insertions(+), 34 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/6515.rst diff --git a/doc/build/changelog/unreleased_14/6515.rst b/doc/build/changelog/unreleased_14/6515.rst new file mode 100644 index 00000000000..0ac5332b552 --- /dev/null +++ b/doc/build/changelog/unreleased_14/6515.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, postgresql + :tickets: 6515 + + Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on + PostgreSQL where using the ``.any()`` method to render SQL ANY(), given + members of the Python enumeration as arguments, would produce a type + adaptation failure on all drivers. diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index e51397da7f0..92aaf1c57dc 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2851,10 +2851,18 @@ def any(self, other, operator=None): elements = util.preloaded.sql_elements operator = operator if operator else operators.eq + arr_type = self.type + # send plain BinaryExpression so that negate remains at None, # leading to NOT expr for negation. return elements.BinaryExpression( - coercions.expect(roles.ExpressionElementRole, other), + coercions.expect( + roles.BinaryElementRole, + element=other, + operator=operator, + expr=self.expr, + bindparam_type=arr_type.item_type, + ), elements.CollectionAggregate._create_any(self.expr), operator, ) @@ -2895,10 +2903,18 @@ def all(self, other, operator=None): elements = util.preloaded.sql_elements operator = operator if operator else operators.eq + arr_type = self.type + # send plain BinaryExpression so that negate remains at None, # leading to NOT expr for negation. return elements.BinaryExpression( - coercions.expect(roles.ExpressionElementRole, other), + coercions.expect( + roles.BinaryElementRole, + element=other, + operator=operator, + expr=self.expr, + bindparam_type=arr_type.item_type, + ), elements.CollectionAggregate._create_all(self.expr), operator, ) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 49ab15261e6..6bd2f2fa2be 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -1523,48 +1523,48 @@ def test_array(self): ) self.assert_compile( postgresql.Any(4, c), - "%(param_1)s = ANY (x)", - checkparams={"param_1": 4}, + "%(x_1)s = ANY (x)", + checkparams={"x_1": 4}, ) self.assert_compile( c.any(5), - "%(param_1)s = ANY (x)", - checkparams={"param_1": 5}, + "%(x_1)s = ANY (x)", + checkparams={"x_1": 5}, ) self.assert_compile( ~c.any(5), - "NOT (%(param_1)s = ANY (x))", - checkparams={"param_1": 5}, + "NOT (%(x_1)s = ANY (x))", + checkparams={"x_1": 5}, ) self.assert_compile( c.all(5), - "%(param_1)s = ALL (x)", - checkparams={"param_1": 5}, + "%(x_1)s = ALL (x)", + checkparams={"x_1": 5}, ) self.assert_compile( ~c.all(5), - "NOT (%(param_1)s = ALL (x))", - checkparams={"param_1": 5}, + "NOT (%(x_1)s = ALL (x))", + checkparams={"x_1": 5}, ) self.assert_compile( c.any(5, operator=operators.ne), - "%(param_1)s != ANY (x)", - checkparams={"param_1": 5}, + "%(x_1)s != ANY (x)", + checkparams={"x_1": 5}, ) self.assert_compile( postgresql.All(6, c, operator=operators.gt), - "%(param_1)s > ALL (x)", - checkparams={"param_1": 6}, + "%(x_1)s > ALL (x)", + checkparams={"x_1": 6}, ) self.assert_compile( c.all(7, operator=operators.lt), - "%(param_1)s < ALL (x)", - checkparams={"param_1": 7}, + "%(x_1)s < ALL (x)", + checkparams={"x_1": 7}, ) @testing.combinations( diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index fe396726270..ad0fcfeeea3 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -1261,16 +1261,16 @@ def test_array_any(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.any(7, operator=operators.lt)), - "SELECT %(param_1)s < ANY (x) AS anon_1", - checkparams={"param_1": 7}, + "SELECT %(x_1)s < ANY (x) AS anon_1", + checkparams={"x_1": 7}, ) def test_array_all(self): col = column("x", postgresql.ARRAY(Integer)) self.assert_compile( select(col.all(7, operator=operators.lt)), - "SELECT %(param_1)s < ALL (x) AS anon_1", - checkparams={"param_1": 7}, + "SELECT %(x_1)s < ALL (x) AS anon_1", + checkparams={"x_1": 7}, ) def test_array_contains(self): @@ -2397,14 +2397,19 @@ class MyEnum(Enum): array_cls(enum_cls(MyEnum)), ), ) + data = [ + {"enum_col": ["foo"], "pyenum_col": [MyEnum.a, MyEnum.b]}, + {"enum_col": ["foo", "bar"], "pyenum_col": [MyEnum.b]}, + ] else: MyEnum = None + data = [ + {"enum_col": ["foo"]}, + {"enum_col": ["foo", "bar"]}, + ] metadata.create_all(connection) - connection.execute( - tbl.insert(), - [{"enum_col": ["foo"]}, {"enum_col": ["foo", "bar"]}], - ) + connection.execute(tbl.insert(), data) return tbl, MyEnum yield go @@ -2421,6 +2426,27 @@ def _enum_combinations(fn): )(fn) ) + @testing.requires.python3 + @_enum_combinations + @testing.combinations("all", "any", argnames="fn") + def test_any_all_roundtrip( + self, array_of_enum_fixture, connection, array_cls, enum_cls, fn + ): + """test #6515""" + + tbl, MyEnum = array_of_enum_fixture(array_cls, enum_cls) + + if fn == "all": + expr = tbl.c.pyenum_col.all(MyEnum.b) + result = [([MyEnum.b],)] + elif fn == "any": + expr = tbl.c.pyenum_col.any(MyEnum.b) + result = [([MyEnum.a, MyEnum.b],), ([MyEnum.b],)] + else: + assert False + sel = select(tbl.c.pyenum_col).where(expr).order_by(tbl.c.id) + eq_(connection.execute(sel).fetchall(), result) + @_enum_combinations def test_array_of_enums_roundtrip( self, array_of_enum_fixture, connection, array_cls, enum_cls diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 4eff872f4f3..c524b0aeaa9 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -3544,8 +3544,8 @@ def test_any_array_comparator_accessor(self, t_fixture): self.assert_compile( t.c.arrval.any(5, operator.gt), - ":param_1 > ANY (tab1.arrval)", - checkparams={"param_1": 5}, + ":arrval_1 > ANY (tab1.arrval)", + checkparams={"arrval_1": 5}, ) def test_any_array_comparator_negate_accessor(self, t_fixture): @@ -3553,8 +3553,8 @@ def test_any_array_comparator_negate_accessor(self, t_fixture): self.assert_compile( ~t.c.arrval.any(5, operator.gt), - "NOT (:param_1 > ANY (tab1.arrval))", - checkparams={"param_1": 5}, + "NOT (:arrval_1 > ANY (tab1.arrval))", + checkparams={"arrval_1": 5}, ) def test_all_array_comparator_accessor(self, t_fixture): @@ -3562,8 +3562,8 @@ def test_all_array_comparator_accessor(self, t_fixture): self.assert_compile( t.c.arrval.all(5, operator.gt), - ":param_1 > ALL (tab1.arrval)", - checkparams={"param_1": 5}, + ":arrval_1 > ALL (tab1.arrval)", + checkparams={"arrval_1": 5}, ) def test_all_array_comparator_negate_accessor(self, t_fixture): @@ -3571,8 +3571,8 @@ def test_all_array_comparator_negate_accessor(self, t_fixture): self.assert_compile( ~t.c.arrval.all(5, operator.gt), - "NOT (:param_1 > ALL (tab1.arrval))", - checkparams={"param_1": 5}, + "NOT (:arrval_1 > ALL (tab1.arrval))", + checkparams={"arrval_1": 5}, ) def test_any_array_expression(self, t_fixture): From e32937fa6a7dcc3d5087aa1f41049373ab9e4038 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 24 Apr 2022 16:19:16 -0400 Subject: [PATCH 218/632] backport 6f02d5edd88fe247 to 1.4 in 6f02d5edd88fe2475629438b0730181a2b00c5fe some cleanup to ForeignKey repaired the use case of ForeignKey objects referring to table name alone, by adding more robust column resolution logic. This change also fixes an issue where the "referred column" naming convention key uses the resolved referred column earlier than usual when a ForeignKey is setting up its constraint. Fixed bug where :class:`.ForeignKeyConstraint` naming conventions using the ``referred_column_0`` naming convention key would not work if the foreign key constraint were set up as a :class:`.ForeignKey` object rather than an explicit :class:`.ForeignKeyConstraint` object. As this change makes use of a backport of some fixes from version 2.0, an additional little-known feature that has likely been broken for many years is also fixed which is that a :class:`.ForeignKey` object may refer to a referred table by name of the table alone without using a column name, if the name of the referent column is the same as that of the referred column. The ``referred_column_0`` naming convention key was not previously not tested with the :class:`.ForeignKey` object, only :class:`.ForeignKeyConstraint`, and this bug reveals that the feature has never worked correctly unless :class:`.ForeignKeyConstraint` is used for all FK constraints. This bug traces back to the original introduction of the feature introduced for :ticket:`3989`. Fixes: #7958 Change-Id: I230d43e9deba5dff889b9e7fee6cd4d3aa2496d3 --- doc/build/changelog/unreleased_14/7958.rst | 20 +++++++ lib/sqlalchemy/sql/schema.py | 38 +++++++----- test/sql/test_metadata.py | 68 +++++++++++++++++++++- 3 files changed, 109 insertions(+), 17 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7958.rst diff --git a/doc/build/changelog/unreleased_14/7958.rst b/doc/build/changelog/unreleased_14/7958.rst new file mode 100644 index 00000000000..057647bd876 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7958.rst @@ -0,0 +1,20 @@ +.. change:: + :tags: bug, schema + :tickets: 7958 + + Fixed bug where :class:`.ForeignKeyConstraint` naming conventions using the + ``referred_column_0`` naming convention key would not work if the foreign + key constraint were set up as a :class:`.ForeignKey` object rather than an + explicit :class:`.ForeignKeyConstraint` object. As this change makes use of + a backport of some fixes from version 2.0, an additional little-known + feature that has likely been broken for many years is also fixed which is + that a :class:`.ForeignKey` object may refer to a referred table by name of + the table alone without using a column name, if the name of the referent + column is the same as that of the referred column. + + The ``referred_column_0`` naming convention key was not previously not + tested with the :class:`.ForeignKey` object, only + :class:`.ForeignKeyConstraint`, and this bug reveals that the feature has + never worked correctly unless :class:`.ForeignKeyConstraint` is used for + all FK constraints. This bug traces back to the original introduction of + the feature introduced for :ticket:`3989`. diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index d91e4e1661d..aa904fcf5a8 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -2433,10 +2433,6 @@ def _resolve_col_tokens(self): return parenttable, tablekey, colname def _link_to_col_by_colstring(self, parenttable, table, colname): - if not hasattr(self.constraint, "_referred_table"): - self.constraint._referred_table = table - else: - assert self.constraint._referred_table is table _column = None if colname is None: @@ -2444,8 +2440,12 @@ def _link_to_col_by_colstring(self, parenttable, table, colname): # was specified as table name only, in which case we # match the column name to the same column on the # parent. - key = self.parent - _column = table.c.get(self.parent.key, None) + # this use case wasn't working in later 1.x series + # as it had no test coverage; fixed in 2.0 + parent = self.parent + assert parent is not None + key = parent.key + _column = table.c.get(key, None) elif self.link_to_name: key = colname for c in table.c: @@ -2465,10 +2465,10 @@ def _link_to_col_by_colstring(self, parenttable, table, colname): key, ) - self._set_target_column(_column) + return _column def _set_target_column(self, column): - assert isinstance(self.parent.table, Table) + assert self.parent is not None # propagate TypeEngine to parent if it didn't have one if self.parent.type._isnull: @@ -2518,14 +2518,11 @@ def column(self): "parent MetaData" % parenttable ) else: - raise exc.NoReferencedColumnError( - "Could not initialize target column for " - "ForeignKey '%s' on table '%s': " - "table '%s' has no column named '%s'" - % (self._colspec, parenttable.name, tablekey, colname), - tablekey, - colname, + table = parenttable.metadata.tables[tablekey] + return self._link_to_col_by_colstring( + parenttable, table, colname ) + elif hasattr(self._colspec, "__clause_element__"): _column = self._colspec.__clause_element__() return _column @@ -2545,6 +2542,11 @@ def _set_parent(self, column, **kw): def _set_remote_table(self, table): parenttable, tablekey, colname = self._resolve_col_tokens() self._link_to_col_by_colstring(parenttable, table, colname) + + _column = self._link_to_col_by_colstring(parenttable, table, colname) + self._set_target_column(_column) + assert self.constraint is not None + self.constraint._validate_dest_table(table) def _remove_from_metadata(self, metadata): @@ -2583,10 +2585,14 @@ def _set_table(self, column, table): if table_key in parenttable.metadata.tables: table = parenttable.metadata.tables[table_key] try: - self._link_to_col_by_colstring(parenttable, table, colname) + _column = self._link_to_col_by_colstring( + parenttable, table, colname + ) except exc.NoReferencedColumnError: # this is OK, we'll try later pass + else: + self._set_target_column(_column) parenttable.metadata._fk_memos[fk_key].append(self) elif hasattr(self._colspec, "__clause_element__"): _column = self._colspec.__clause_element__() diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 7205c882333..50cf253379f 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -761,7 +761,10 @@ def test_assorted_repr(self): "%s" ", name='someconstraint')" % repr(ck.sqltext), ), - (ColumnDefault(("foo", "bar")), "ColumnDefault(('foo', 'bar'))"), + ( + ColumnDefault(("foo", "bar")), + "ColumnDefault(('foo', 'bar'))", + ), ): eq_(repr(const), exp) @@ -919,6 +922,46 @@ def test_col_key_fk_parent(self): a2 = a.to_metadata(m2) assert b2.c.y.references(a2.c.x) + def test_fk_w_no_colname(self): + """test a ForeignKey that refers to table name only. the column + name is assumed to be the same col name on parent table. + + this is a little used feature from long ago that nonetheless is + still in the code. + + The feature was found to be not working but is repaired for + SQLAlchemy 2.0. + + """ + m1 = MetaData() + a = Table("a", m1, Column("x", Integer)) + b = Table("b", m1, Column("x", Integer, ForeignKey("a"))) + assert b.c.x.references(a.c.x) + + m2 = MetaData() + b2 = b.to_metadata(m2) + a2 = a.to_metadata(m2) + assert b2.c.x.references(a2.c.x) + + def test_fk_w_no_colname_name_missing(self): + """test a ForeignKey that refers to table name only. the column + name is assumed to be the same col name on parent table. + + this is a little used feature from long ago that nonetheless is + still in the code. + + """ + m1 = MetaData() + a = Table("a", m1, Column("x", Integer)) + b = Table("b", m1, Column("y", Integer, ForeignKey("a"))) + + with expect_raises_message( + exc.NoReferencedColumnError, + "Could not initialize target column for ForeignKey 'a' on " + "table 'b': table 'a' has no column named 'y'", + ): + assert b.c.y.references(a.c.x) + def test_column_collection_constraint_w_ad_hoc_columns(self): """Test ColumnCollectionConstraint that has columns that aren't part of the Table. @@ -5303,6 +5346,29 @@ def test_fk_attrs(self): a1.append_constraint(fk) eq_(fk.name, "fk_address_user_id_user_id") + @testing.combinations(True, False, argnames="col_has_type") + def test_fk_ref_local_referent_has_no_type(self, col_has_type): + """test #7958""" + + metadata = MetaData( + naming_convention={ + "fk": "fk_%(referred_column_0_name)s", + } + ) + Table("a", metadata, Column("id", Integer, primary_key=True)) + b = Table( + "b", + metadata, + Column("id", Integer, primary_key=True), + Column("aid", ForeignKey("a.id")) + if not col_has_type + else Column("aid", Integer, ForeignKey("a.id")), + ) + fks = list( + c for c in b.constraints if isinstance(c, ForeignKeyConstraint) + ) + eq_(fks[0].name, "fk_id") + def test_custom(self): def key_hash(const, table): return "HASH_%s" % table.name From d0653db2583b7a4bac1b50021c0956dcec740780 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 26 Apr 2022 15:02:37 -0400 Subject: [PATCH 219/632] repair fetch_setting call in mysql pyodbc dialect Fixed a regression in the untested MySQL PyODBC dialect caused by the fix for :ticket:`7518` in version 1.4.32 where an argument was being propagated incorrectly upon first connect, leading to a ``TypeError``. Fixes: #7871 Change-Id: I37f8ca8e83cb352ee2a2336b52863858259b1d77 (cherry picked from commit 3deff88fe12adc470792f71da7b9c54a5438638f) --- doc/build/changelog/unreleased_14/7871.rst | 7 +++++++ lib/sqlalchemy/dialects/mysql/pyodbc.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/7871.rst diff --git a/doc/build/changelog/unreleased_14/7871.rst b/doc/build/changelog/unreleased_14/7871.rst new file mode 100644 index 00000000000..e2b8e9769f7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7871.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mysql, regression + :tickets: 7871 + + Fixed a regression in the untested MySQL PyODBC dialect caused by the fix + for :ticket:`7518` in version 1.4.32 where an argument was being propagated + incorrectly upon first connect, leading to a ``TypeError``. diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 22d60bd1535..aa2190bf46c 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -89,7 +89,7 @@ def _detect_charset(self, connection): # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. try: - value = self._fetch_setting("character_set_client") + value = self._fetch_setting(connection, "character_set_client") if value: return value except exc.DBAPIError: From 0c17ba55e695bd71048d750f0dcc24082338444c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 26 Apr 2022 15:32:30 -0400 Subject: [PATCH 220/632] changelog edits for 1.4.36 Change-Id: I0dd6b8ace355e4884b918484ae7b2c7a8319ff7e --- doc/build/changelog/unreleased_14/6515.rst | 6 +++--- doc/build/changelog/unreleased_14/7875.rst | 2 +- doc/build/changelog/unreleased_14/7930.rst | 7 ++++--- doc/build/changelog/unreleased_14/7936.rst | 8 ++++---- doc/build/changelog/unreleased_14/7937.rst | 7 ++++--- doc/build/changelog/unreleased_14/7943.rst | 8 ++++---- doc/build/changelog/unreleased_14/7953.rst | 15 ++++++++++----- doc/build/changelog/unreleased_14/7958.rst | 12 ++++++------ 8 files changed, 36 insertions(+), 29 deletions(-) diff --git a/doc/build/changelog/unreleased_14/6515.rst b/doc/build/changelog/unreleased_14/6515.rst index 0ac5332b552..7db6a67c7ab 100644 --- a/doc/build/changelog/unreleased_14/6515.rst +++ b/doc/build/changelog/unreleased_14/6515.rst @@ -3,6 +3,6 @@ :tickets: 6515 Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on - PostgreSQL where using the ``.any()`` method to render SQL ANY(), given - members of the Python enumeration as arguments, would produce a type - adaptation failure on all drivers. + PostgreSQL where using the ``.any()`` or ``.all()`` methods to render SQL + ANY() or ALL(), given members of the Python enumeration as arguments, would + produce a type adaptation failure on all drivers. diff --git a/doc/build/changelog/unreleased_14/7875.rst b/doc/build/changelog/unreleased_14/7875.rst index a56fa801060..3e14f5c7faa 100644 --- a/doc/build/changelog/unreleased_14/7875.rst +++ b/doc/build/changelog/unreleased_14/7875.rst @@ -4,7 +4,7 @@ Fixed a memory leak in the C extensions which could occur when calling upon named members of :class:`.Row` when the member does not exist under Python - 3; in particular this could occur during numpy transformations when it + 3; in particular this could occur during NumPy transformations when it attempts to call members such as ``.__array__``, but the issue was surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This issue does not apply to version 2.0 which has already transitioned to diff --git a/doc/build/changelog/unreleased_14/7930.rst b/doc/build/changelog/unreleased_14/7930.rst index bf4f9988ca2..56a2ef530f5 100644 --- a/doc/build/changelog/unreleased_14/7930.rst +++ b/doc/build/changelog/unreleased_14/7930.rst @@ -2,6 +2,7 @@ :tags: bug, postgresql :tickets: 7930 - Fixed an issue what would cause autocommit mode to be reset - when using pre_ping in conjunction engine level autocommit - on the psycopg2 driver. + Fixed an issue in the psycopg2 dialect when using the + :paramref:`.create_engine.pool_pre_ping` parameter which would cause + user-configured ``AUTOCOMMIT`` isolation level to be inadvertently reset by + the "ping" handler. diff --git a/doc/build/changelog/unreleased_14/7936.rst b/doc/build/changelog/unreleased_14/7936.rst index bcad142b0bc..48c63328e58 100644 --- a/doc/build/changelog/unreleased_14/7936.rst +++ b/doc/build/changelog/unreleased_14/7936.rst @@ -2,10 +2,10 @@ :tags: bug, orm, regression :tickets: 7936 - Fixed regression where the change in #7861, released in version 1.4.33, - that brought the :class:`.Insert` construct to be partially recognized as - an ORM-enabled statement did not properly transfer the correct mapper / - mapped table state to the :class:`.Session`, causing the + Fixed regression where the change made for :ticket:`7861`, released in + version 1.4.33, that brought the :class:`.Insert` construct to be partially + recognized as an ORM-enabled statement did not properly transfer the + correct mapper / mapped table state to the :class:`.Session`, causing the :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was bound to engines and/or connections using the :paramref:`.Session.binds` parameter. diff --git a/doc/build/changelog/unreleased_14/7937.rst b/doc/build/changelog/unreleased_14/7937.rst index 96d80d6cd27..50dd00aa854 100644 --- a/doc/build/changelog/unreleased_14/7937.rst +++ b/doc/build/changelog/unreleased_14/7937.rst @@ -2,7 +2,8 @@ :tags: bug, asyncio :tickets: 7937 - Allow setting contextvar values inside async adapted event handlers. - Previously the value set to the contextvar would not be properly - propagated. + Repaired handling of ``contextvar.ContextVar`` objects inside of async + adapted event handlers. Previously, values applied to a ``ContextVar`` + would not be propagated in the specific case of calling upon awaitables + inside of non-awaitable code. diff --git a/doc/build/changelog/unreleased_14/7943.rst b/doc/build/changelog/unreleased_14/7943.rst index e5ed12e7f8f..87fef1c21a0 100644 --- a/doc/build/changelog/unreleased_14/7943.rst +++ b/doc/build/changelog/unreleased_14/7943.rst @@ -3,7 +3,7 @@ :tickets: 7943 Implemented :attr:`_postgresql.UUID.python_type` attribute for the - :class:`_postgresql.UUID` type object. The attribute will return either - ``str`` or ``uuid.UUID`` based on the :paramref:`_postgresql.UUID.as_uuid` - parameter setting. Previously, this attribute was unimplemented. Pull - request courtesy Alex Grönholm. \ No newline at end of file + PostgreSQL :class:`_postgresql.UUID` type object. The attribute will return + either ``str`` or ``uuid.UUID`` based on the + :paramref:`_postgresql.UUID.as_uuid` parameter setting. Previously, this + attribute was unimplemented. Pull request courtesy Alex Grönholm. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7953.rst b/doc/build/changelog/unreleased_14/7953.rst index d4e5037dcd8..31b57d2f1ed 100644 --- a/doc/build/changelog/unreleased_14/7953.rst +++ b/doc/build/changelog/unreleased_14/7953.rst @@ -2,9 +2,14 @@ :tags: bug, engine :tickets: 7953 - A warning is emitted when calling upon the :meth:`.Result.columns` method - with only one index, in particular ORM related cases, indicating that the - current behavior of :meth:`.Result.columns` is broken in this case and - will be changed in 2.0. To receive a collection of scalar values, - use the :meth:`.Result.scalars` method. + Added a warning regarding a bug which exists in the :meth:`.Result.columns` + method when passing 0 for the index in conjunction with a :class:`.Result` + that will return a single ORM entity, which indicates that the current + behavior of :meth:`.Result.columns` is broken in this case as the + :class:`.Result` object will yield scalar values and not :class:`.Row` + objects. The issue will be fixed in 2.0, which would be a + backwards-incompatible change for code that relies on the current broken + behavior. Code which wants to receive a collection of scalar values should + use the :meth:`.Result.scalars` method, which will return a new + :class:`.ScalarResult` object that yields non-row scalar objects. diff --git a/doc/build/changelog/unreleased_14/7958.rst b/doc/build/changelog/unreleased_14/7958.rst index 057647bd876..dc9f96a317d 100644 --- a/doc/build/changelog/unreleased_14/7958.rst +++ b/doc/build/changelog/unreleased_14/7958.rst @@ -12,9 +12,9 @@ the table alone without using a column name, if the name of the referent column is the same as that of the referred column. - The ``referred_column_0`` naming convention key was not previously not - tested with the :class:`.ForeignKey` object, only - :class:`.ForeignKeyConstraint`, and this bug reveals that the feature has - never worked correctly unless :class:`.ForeignKeyConstraint` is used for - all FK constraints. This bug traces back to the original introduction of - the feature introduced for :ticket:`3989`. + The ``referred_column_0`` naming convention key was previously not tested + with the :class:`.ForeignKey` object, only :class:`.ForeignKeyConstraint`, + and this bug reveals that the feature has never worked correctly unless + :class:`.ForeignKeyConstraint` is used for all FK constraints. This bug + traces back to the original introduction of the feature introduced for + :ticket:`3989`. From 9bb5d8fed9b87bdbbbd7b3e34aa1f5b06faff29e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 26 Apr 2022 16:56:04 -0400 Subject: [PATCH 221/632] - 1.4.36 --- doc/build/changelog/changelog_14.rst | 134 ++++++++++++++++++++- doc/build/changelog/unreleased_14/6515.rst | 8 -- doc/build/changelog/unreleased_14/7871.rst | 7 -- doc/build/changelog/unreleased_14/7875.rst | 12 -- doc/build/changelog/unreleased_14/7900.rst | 14 --- doc/build/changelog/unreleased_14/7919.rst | 8 -- doc/build/changelog/unreleased_14/7930.rst | 8 -- doc/build/changelog/unreleased_14/7936.rst | 11 -- doc/build/changelog/unreleased_14/7937.rst | 9 -- doc/build/changelog/unreleased_14/7943.rst | 9 -- doc/build/changelog/unreleased_14/7953.rst | 15 --- doc/build/changelog/unreleased_14/7958.rst | 20 --- doc/build/conf.py | 4 +- 13 files changed, 135 insertions(+), 124 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/6515.rst delete mode 100644 doc/build/changelog/unreleased_14/7871.rst delete mode 100644 doc/build/changelog/unreleased_14/7875.rst delete mode 100644 doc/build/changelog/unreleased_14/7900.rst delete mode 100644 doc/build/changelog/unreleased_14/7919.rst delete mode 100644 doc/build/changelog/unreleased_14/7930.rst delete mode 100644 doc/build/changelog/unreleased_14/7936.rst delete mode 100644 doc/build/changelog/unreleased_14/7937.rst delete mode 100644 doc/build/changelog/unreleased_14/7943.rst delete mode 100644 doc/build/changelog/unreleased_14/7953.rst delete mode 100644 doc/build/changelog/unreleased_14/7958.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 79426351e94..87bd45e4ea9 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,139 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.36 - :include_notes_from: unreleased_14 + :released: April 26, 2022 + + .. change:: + :tags: bug, mysql, regression + :tickets: 7871 + + Fixed a regression in the untested MySQL PyODBC dialect caused by the fix + for :ticket:`7518` in version 1.4.32 where an argument was being propagated + incorrectly upon first connect, leading to a ``TypeError``. + + .. change:: + :tags: bug, orm, regression + :tickets: 7936 + + Fixed regression where the change made for :ticket:`7861`, released in + version 1.4.33, that brought the :class:`.Insert` construct to be partially + recognized as an ORM-enabled statement did not properly transfer the + correct mapper / mapped table state to the :class:`.Session`, causing the + :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was + bound to engines and/or connections using the :paramref:`.Session.binds` + parameter. + + .. change:: + :tags: bug, engine + :tickets: 7875 + + Fixed a memory leak in the C extensions which could occur when calling upon + named members of :class:`.Row` when the member does not exist under Python + 3; in particular this could occur during NumPy transformations when it + attempts to call members such as ``.__array__``, but the issue was + surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This + issue does not apply to version 2.0 which has already transitioned to + Cython. Thanks much to Sebastian Berg for identifying the problem. + + + .. change:: + :tags: bug, postgresql + :tickets: 6515 + + Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on + PostgreSQL where using the ``.any()`` or ``.all()`` methods to render SQL + ANY() or ALL(), given members of the Python enumeration as arguments, would + produce a type adaptation failure on all drivers. + + .. change:: + :tags: bug, postgresql + :tickets: 7943 + + Implemented :attr:`_postgresql.UUID.python_type` attribute for the + PostgreSQL :class:`_postgresql.UUID` type object. The attribute will return + either ``str`` or ``uuid.UUID`` based on the + :paramref:`_postgresql.UUID.as_uuid` parameter setting. Previously, this + attribute was unimplemented. Pull request courtesy Alex Grönholm. + + .. change:: + :tags: bug, tests + :tickets: 7919 + + For third party dialects, repaired a missing requirement for the + ``SimpleUpdateDeleteTest`` suite test which was not checking for a working + "rowcount" function on the target dialect. + + + .. change:: + :tags: bug, postgresql + :tickets: 7930 + + Fixed an issue in the psycopg2 dialect when using the + :paramref:`.create_engine.pool_pre_ping` parameter which would cause + user-configured ``AUTOCOMMIT`` isolation level to be inadvertently reset by + the "ping" handler. + + .. change:: + :tags: bug, asyncio + :tickets: 7937 + + Repaired handling of ``contextvar.ContextVar`` objects inside of async + adapted event handlers. Previously, values applied to a ``ContextVar`` + would not be propagated in the specific case of calling upon awaitables + inside of non-awaitable code. + + + .. change:: + :tags: bug, engine + :tickets: 7953 + + Added a warning regarding a bug which exists in the :meth:`.Result.columns` + method when passing 0 for the index in conjunction with a :class:`.Result` + that will return a single ORM entity, which indicates that the current + behavior of :meth:`.Result.columns` is broken in this case as the + :class:`.Result` object will yield scalar values and not :class:`.Row` + objects. The issue will be fixed in 2.0, which would be a + backwards-incompatible change for code that relies on the current broken + behavior. Code which wants to receive a collection of scalar values should + use the :meth:`.Result.scalars` method, which will return a new + :class:`.ScalarResult` object that yields non-row scalar objects. + + + .. change:: + :tags: bug, schema + :tickets: 7958 + + Fixed bug where :class:`.ForeignKeyConstraint` naming conventions using the + ``referred_column_0`` naming convention key would not work if the foreign + key constraint were set up as a :class:`.ForeignKey` object rather than an + explicit :class:`.ForeignKeyConstraint` object. As this change makes use of + a backport of some fixes from version 2.0, an additional little-known + feature that has likely been broken for many years is also fixed which is + that a :class:`.ForeignKey` object may refer to a referred table by name of + the table alone without using a column name, if the name of the referent + column is the same as that of the referred column. + + The ``referred_column_0`` naming convention key was previously not tested + with the :class:`.ForeignKey` object, only :class:`.ForeignKeyConstraint`, + and this bug reveals that the feature has never worked correctly unless + :class:`.ForeignKeyConstraint` is used for all FK constraints. This bug + traces back to the original introduction of the feature introduced for + :ticket:`3989`. + + .. change:: + :tags: bug, orm, declarative + :tickets: 7900 + + Modified the :class:`.DeclarativeMeta` metaclass to pass ``cls.__dict__`` + into the declarative scanning process to look for attributes, rather than + the separate dictionary passed to the type's ``__init__()`` method. This + allows user-defined base classes that add attributes within an + ``__init_subclass__()`` to work as expected, as ``__init_subclass__()`` can + only affect the ``cls.__dict__`` itself and not the other dictionary. This + is technically a regression from 1.3 where ``__dict__`` was being used. + + + .. changelog:: :version: 1.4.35 diff --git a/doc/build/changelog/unreleased_14/6515.rst b/doc/build/changelog/unreleased_14/6515.rst deleted file mode 100644 index 7db6a67c7ab..00000000000 --- a/doc/build/changelog/unreleased_14/6515.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 6515 - - Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on - PostgreSQL where using the ``.any()`` or ``.all()`` methods to render SQL - ANY() or ALL(), given members of the Python enumeration as arguments, would - produce a type adaptation failure on all drivers. diff --git a/doc/build/changelog/unreleased_14/7871.rst b/doc/build/changelog/unreleased_14/7871.rst deleted file mode 100644 index e2b8e9769f7..00000000000 --- a/doc/build/changelog/unreleased_14/7871.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mysql, regression - :tickets: 7871 - - Fixed a regression in the untested MySQL PyODBC dialect caused by the fix - for :ticket:`7518` in version 1.4.32 where an argument was being propagated - incorrectly upon first connect, leading to a ``TypeError``. diff --git a/doc/build/changelog/unreleased_14/7875.rst b/doc/build/changelog/unreleased_14/7875.rst deleted file mode 100644 index 3e14f5c7faa..00000000000 --- a/doc/build/changelog/unreleased_14/7875.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7875 - - Fixed a memory leak in the C extensions which could occur when calling upon - named members of :class:`.Row` when the member does not exist under Python - 3; in particular this could occur during NumPy transformations when it - attempts to call members such as ``.__array__``, but the issue was - surrounding any ``AttributeError`` thrown by the :class:`.Row` object. This - issue does not apply to version 2.0 which has already transitioned to - Cython. Thanks much to Sebastian Berg for identifying the problem. - diff --git a/doc/build/changelog/unreleased_14/7900.rst b/doc/build/changelog/unreleased_14/7900.rst deleted file mode 100644 index 9d6d507703c..00000000000 --- a/doc/build/changelog/unreleased_14/7900.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, orm, declarative - :tickets: 7900 - - Modified the :class:`.DeclarativeMeta` metaclass to pass ``cls.__dict__`` - into the declarative scanning process to look for attributes, rather than - the separate dictionary passed to the type's ``__init__()`` method. This - allows user-defined base classes that add attributes within an - ``__init_subclass__()`` to work as expected, as ``__init_subclass__()`` can - only affect the ``cls.__dict__`` itself and not the other dictionary. This - is technically a regression from 1.3 where ``__dict__`` was being used. - - - diff --git a/doc/build/changelog/unreleased_14/7919.rst b/doc/build/changelog/unreleased_14/7919.rst deleted file mode 100644 index fdba724e8a0..00000000000 --- a/doc/build/changelog/unreleased_14/7919.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, tests - :tickets: 7919 - - For third party dialects, repaired a missing requirement for the - ``SimpleUpdateDeleteTest`` suite test which was not checking for a working - "rowcount" function on the target dialect. - diff --git a/doc/build/changelog/unreleased_14/7930.rst b/doc/build/changelog/unreleased_14/7930.rst deleted file mode 100644 index 56a2ef530f5..00000000000 --- a/doc/build/changelog/unreleased_14/7930.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 7930 - - Fixed an issue in the psycopg2 dialect when using the - :paramref:`.create_engine.pool_pre_ping` parameter which would cause - user-configured ``AUTOCOMMIT`` isolation level to be inadvertently reset by - the "ping" handler. diff --git a/doc/build/changelog/unreleased_14/7936.rst b/doc/build/changelog/unreleased_14/7936.rst deleted file mode 100644 index 48c63328e58..00000000000 --- a/doc/build/changelog/unreleased_14/7936.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 7936 - - Fixed regression where the change made for :ticket:`7861`, released in - version 1.4.33, that brought the :class:`.Insert` construct to be partially - recognized as an ORM-enabled statement did not properly transfer the - correct mapper / mapped table state to the :class:`.Session`, causing the - :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was - bound to engines and/or connections using the :paramref:`.Session.binds` - parameter. diff --git a/doc/build/changelog/unreleased_14/7937.rst b/doc/build/changelog/unreleased_14/7937.rst deleted file mode 100644 index 50dd00aa854..00000000000 --- a/doc/build/changelog/unreleased_14/7937.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 7937 - - Repaired handling of ``contextvar.ContextVar`` objects inside of async - adapted event handlers. Previously, values applied to a ``ContextVar`` - would not be propagated in the specific case of calling upon awaitables - inside of non-awaitable code. - diff --git a/doc/build/changelog/unreleased_14/7943.rst b/doc/build/changelog/unreleased_14/7943.rst deleted file mode 100644 index 87fef1c21a0..00000000000 --- a/doc/build/changelog/unreleased_14/7943.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 7943 - - Implemented :attr:`_postgresql.UUID.python_type` attribute for the - PostgreSQL :class:`_postgresql.UUID` type object. The attribute will return - either ``str`` or ``uuid.UUID`` based on the - :paramref:`_postgresql.UUID.as_uuid` parameter setting. Previously, this - attribute was unimplemented. Pull request courtesy Alex Grönholm. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7953.rst b/doc/build/changelog/unreleased_14/7953.rst deleted file mode 100644 index 31b57d2f1ed..00000000000 --- a/doc/build/changelog/unreleased_14/7953.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 7953 - - Added a warning regarding a bug which exists in the :meth:`.Result.columns` - method when passing 0 for the index in conjunction with a :class:`.Result` - that will return a single ORM entity, which indicates that the current - behavior of :meth:`.Result.columns` is broken in this case as the - :class:`.Result` object will yield scalar values and not :class:`.Row` - objects. The issue will be fixed in 2.0, which would be a - backwards-incompatible change for code that relies on the current broken - behavior. Code which wants to receive a collection of scalar values should - use the :meth:`.Result.scalars` method, which will return a new - :class:`.ScalarResult` object that yields non-row scalar objects. - diff --git a/doc/build/changelog/unreleased_14/7958.rst b/doc/build/changelog/unreleased_14/7958.rst deleted file mode 100644 index dc9f96a317d..00000000000 --- a/doc/build/changelog/unreleased_14/7958.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. change:: - :tags: bug, schema - :tickets: 7958 - - Fixed bug where :class:`.ForeignKeyConstraint` naming conventions using the - ``referred_column_0`` naming convention key would not work if the foreign - key constraint were set up as a :class:`.ForeignKey` object rather than an - explicit :class:`.ForeignKeyConstraint` object. As this change makes use of - a backport of some fixes from version 2.0, an additional little-known - feature that has likely been broken for many years is also fixed which is - that a :class:`.ForeignKey` object may refer to a referred table by name of - the table alone without using a column name, if the name of the referent - column is the same as that of the referred column. - - The ``referred_column_0`` naming convention key was previously not tested - with the :class:`.ForeignKey` object, only :class:`.ForeignKeyConstraint`, - and this bug reveals that the feature has never worked correctly unless - :class:`.ForeignKeyConstraint` is used for all FK constraints. This bug - traces back to the original introduction of the feature introduced for - :ticket:`3989`. diff --git a/doc/build/conf.py b/doc/build/conf.py index fd10f6a0aaa..ee5d37066c4 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.35" +release = "1.4.36" -release_date = "April 6, 2022" +release_date = "April 26, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 79db8212262eba52b76bb3247c5b9e5b5b1bb89e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 26 Apr 2022 17:12:08 -0400 Subject: [PATCH 222/632] Version 1.4.37 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 87bd45e4ea9..d8f57915c53 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.37 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.36 :released: April 26, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 9c6fddf0a8d..0ed1fef0715 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.36" +__version__ = "1.4.37" def __go(lcls): From 55ac391ef92a46556c1579899feef051296acd4b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 1 May 2022 12:28:36 -0400 Subject: [PATCH 223/632] use bindparam_type in BinaryElementImpl._post_coercion if available Fixed an issue where using :func:`.bindparam` with no explicit data or type given could be coerced into the incorrect type when used in expressions such as when using :meth:`.ARRAY.comparator.any` and :meth:`.ARRAY.comparator.all`. Fixes: #7979 Change-Id: If7779e713c9a3a5fee496b66e417cfd3fca5b1f9 (cherry picked from commit 889cbe53121c8fd50c845357dd52b24594346b68) --- doc/build/changelog/unreleased_14/7979.rst | 9 +++++++++ lib/sqlalchemy/sql/coercions.py | 6 ++++-- test/sql/test_operators.py | 19 +++++++++++++++++++ 3 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7979.rst diff --git a/doc/build/changelog/unreleased_14/7979.rst b/doc/build/changelog/unreleased_14/7979.rst new file mode 100644 index 00000000000..9a82a290979 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7979.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 7979 + + Fixed an issue where using :func:`.bindparam` with no explicit data or type + given could be coerced into the incorrect type when used in expressions + such as when using :meth:`.ARRAY.comparator.any` and + :meth:`.ARRAY.comparator.all`. + diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index b3974c3d360..8cc73cb5c5a 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -524,9 +524,11 @@ def _literal_coercion( except exc.ArgumentError as err: self._raise_for_expected(element, err=err) - def _post_coercion(self, resolved, expr, **kw): + def _post_coercion(self, resolved, expr, bindparam_type=None, **kw): if resolved.type._isnull and not expr.type._isnull: - resolved = resolved._with_binary_element_type(expr.type) + resolved = resolved._with_binary_element_type( + bindparam_type if bindparam_type is not None else expr.type + ) return resolved diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index c524b0aeaa9..116d6b79232 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -3,6 +3,7 @@ from sqlalchemy import and_ from sqlalchemy import between +from sqlalchemy import bindparam from sqlalchemy import exc from sqlalchemy import Integer from sqlalchemy import join @@ -3539,6 +3540,24 @@ def test_illegal_ops(self, t_fixture): t.c.data + all_(t.c.arrval), "tab1.data + ALL (tab1.arrval)" ) + @testing.combinations("all", "any", argnames="op") + def test_any_all_bindparam_coercion(self, t_fixture, op): + """test #7979""" + t = t_fixture + + if op == "all": + expr = t.c.arrval.all(bindparam("param")) + expected = "%(param)s = ALL (tab1.arrval)" + elif op == "any": + expr = t.c.arrval.any(bindparam("param")) + expected = "%(param)s = ANY (tab1.arrval)" + else: + assert False + + is_(expr.left.type._type_affinity, Integer) + + self.assert_compile(expr, expected, dialect="postgresql") + def test_any_array_comparator_accessor(self, t_fixture): t = t_fixture From 17f33b7d45af0f310db34cfe2c629f3d26dd0796 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 May 2022 08:58:27 -0400 Subject: [PATCH 224/632] bypass pyodbc default server version / set charset Further adjustments to the MySQL PyODBC dialect to allow for complete connectivity, which was previously still not working despite fixes in :ticket:`7871`. Fixes: #7966 Change-Id: I549ea9e7b6e722e22d3e25bdb2fe0934603e2454 (cherry picked from commit 4abca61b4903e42f9568cc06f3c18ac27a139cf7) --- doc/build/changelog/unreleased_14/7966.rst | 7 +++++++ lib/sqlalchemy/dialects/mysql/pyodbc.py | 6 ++++++ 2 files changed, 13 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7966.rst diff --git a/doc/build/changelog/unreleased_14/7966.rst b/doc/build/changelog/unreleased_14/7966.rst new file mode 100644 index 00000000000..b07baec4532 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7966.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mysql + :tickets: 7966 + + Further adjustments to the MySQL PyODBC dialect to allow for complete + connectivity, which was previously still not working despite fixes in + :ticket:`7871`. diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index aa2190bf46c..bfa61f6480f 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -88,6 +88,9 @@ def _detect_charset(self, connection): # # If it's decided that issuing that sort of SQL leaves you SOL, then # this can prefer the driver value. + + # set this to None as _fetch_setting attempts to use it (None is OK) + self._connection_charset = None try: value = self._fetch_setting(connection, "character_set_client") if value: @@ -101,6 +104,9 @@ def _detect_charset(self, connection): ) return "latin1" + def _get_server_version_info(self, connection): + return MySQLDialect._get_server_version_info(self, connection) + def _extract_error_code(self, exception): m = re.compile(r"\((\d+)\)").search(str(exception.args)) c = m.group(1) From c5ca81697feedb2739453ff6c5bc6ec76ed58eb7 Mon Sep 17 00:00:00 2001 From: khashashin Date: Tue, 3 May 2022 22:00:59 +0200 Subject: [PATCH 225/632] docs(types) Fix missing import from sqlalchemy (#7978) * docs(types) Fix missing import from sqlalchemy The sample code is missing the import of Enum from sqlalchemy, which might confuse the reader, since we are using another enum type from Python itself here. So it makes sense to clarify that here. * fix whitespaces Change-Id: I019bbed8a7278f60e7239160ea4c99ecd2519d3b Co-authored-by: Federico Caselli (cherry picked from commit f89a202d60215b13e3733a3ea950473962b3cf67) --- lib/sqlalchemy/sql/sqltypes.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 92aaf1c57dc..306ac397df3 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1270,6 +1270,8 @@ class Enum(Emulated, String, SchemaType): a plain-string enumerated type:: import enum + from sqlalchemy import Enum + class MyEnum(enum.Enum): one = 1 two = 2 From e6c92d2622d28130a773bc5d27449c8630335bce Mon Sep 17 00:00:00 2001 From: dzcode <9089037+dzcode@users.noreply.github.com> Date: Tue, 3 May 2022 14:18:43 -0600 Subject: [PATCH 226/632] Use tuple instead of raw url in string formatting (#7987) * Fixes: #7902 - Use tuple instead of raw url in string formatting * Fix lint error (cherry picked from commit 675c3e17f7fcccb7534c46adb56529fc3ddd8dbf) --- lib/sqlalchemy/testing/provision.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py index a911ba69cee..90c4d93cfc4 100644 --- a/lib/sqlalchemy/testing/provision.py +++ b/lib/sqlalchemy/testing/provision.py @@ -287,13 +287,15 @@ def create_db(cfg, eng, ident): Used when a test run will employ multiple processes, e.g., when run via `tox` or `pytest -n4`. """ - raise NotImplementedError("no DB creation routine for cfg: %s" % eng.url) + raise NotImplementedError( + "no DB creation routine for cfg: %s" % (eng.url,) + ) @register.init def drop_db(cfg, eng, ident): """Drop a database that we dynamically created for testing.""" - raise NotImplementedError("no DB drop routine for cfg: %s" % eng.url) + raise NotImplementedError("no DB drop routine for cfg: %s" % (eng.url,)) @register.init @@ -377,7 +379,7 @@ def temp_table_keyword_args(cfg, eng): ComponentReflectionTest class in suite/test_reflection.py """ raise NotImplementedError( - "no temp table keyword args routine for cfg: %s" % eng.url + "no temp table keyword args routine for cfg: %s" % (eng.url,) ) From 1bfbe942c3c128137196b18c9fbaa294eefd9b87 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 9 May 2022 10:27:51 -0400 Subject: [PATCH 227/632] dont use the label convention for memoized entities Fixed issue where ORM results would apply incorrect key names to the returned :class:`.Row` objects in the case where the set of columns to be selected were changed, such as when using :meth:`.Select.with_only_columns`. Fixes: #8001 Change-Id: If3a2a5d00d15ebc2e9d41494845cfb3b06f80dcc (cherry picked from commit 319f09ffced3f655e7d500b3a9965e19468fd9d9) --- doc/build/changelog/unreleased_14/8001.rst | 8 ++++ lib/sqlalchemy/orm/context.py | 48 +++++++++++++++++----- test/orm/test_query.py | 43 ++++++++----------- 3 files changed, 63 insertions(+), 36 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8001.rst diff --git a/doc/build/changelog/unreleased_14/8001.rst b/doc/build/changelog/unreleased_14/8001.rst new file mode 100644 index 00000000000..aa8251445a4 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8001.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 8001 + + Fixed issue where ORM results would apply incorrect key names to the + returned :class:`.Row` objects in the case where the set of columns to be + selected were changed, such as when using + :meth:`.Select.with_only_columns`. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 49d354cb3d2..2e3066db937 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -676,10 +676,6 @@ def create_for_statement(cls, statement, compiler, **kw): else: self.label_style = self.select_statement._label_style - self._label_convention = self._column_naming_convention( - statement._label_style, self.use_legacy_query_style - ) - if select_statement._memoized_select_entities: self._memoized_entities = { memoized_entities: _QueryEntity.to_compile_state( @@ -693,6 +689,14 @@ def create_for_statement(cls, statement, compiler, **kw): ) } + # label_convention is stateful and will yield deduping keys if it + # sees the same key twice. therefore it's important that it is not + # invoked for the above "memoized" entities that aren't actually + # in the columns clause + self._label_convention = self._column_naming_convention( + statement._label_style, self.use_legacy_query_style + ) + _QueryEntity.to_compile_state( self, select_statement._raw_columns, @@ -2444,11 +2448,15 @@ def to_compile_state( entity._select_iterable, entities_collection, idx, + is_current_entities, ) else: if entity._annotations.get("bundle", False): _BundleEntity( - compile_state, entity, entities_collection + compile_state, + entity, + entities_collection, + is_current_entities, ) elif entity._is_clause_list: # this is legacy only - test_composites.py @@ -2458,10 +2466,15 @@ def to_compile_state( entity._select_iterable, entities_collection, idx, + is_current_entities, ) else: _ColumnEntity._for_columns( - compile_state, [entity], entities_collection, idx + compile_state, + [entity], + entities_collection, + idx, + is_current_entities, ) elif entity.is_bundle: _BundleEntity(compile_state, entity, entities_collection) @@ -2666,6 +2679,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, setup_entities=True, parent_bundle=None, ): @@ -2696,6 +2710,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, parent_bundle=self, ) elif isinstance(expr, Bundle): @@ -2703,6 +2718,7 @@ def __init__( compile_state, expr, entities_collection, + is_current_entities, parent_bundle=self, ) else: @@ -2711,6 +2727,7 @@ def __init__( [expr], entities_collection, None, + is_current_entities, parent_bundle=self, ) @@ -2784,6 +2801,7 @@ def _for_columns( columns, entities_collection, raw_column_index, + is_current_entities, parent_bundle=None, ): for column in columns: @@ -2803,6 +2821,7 @@ def _for_columns( entities_collection, _entity, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) else: @@ -2812,6 +2831,7 @@ def _for_columns( entities_collection, _entity, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) else: @@ -2820,6 +2840,7 @@ def _for_columns( column, entities_collection, raw_column_index, + is_current_entities, parent_bundle=parent_bundle, ) @@ -2910,12 +2931,14 @@ def __init__( column, entities_collection, raw_column_index, + is_current_entities, parent_bundle=None, ): self.expr = column self.raw_column_index = raw_column_index self.translate_raw_column = raw_column_index is not None - if column._is_text_clause: + + if not is_current_entities or column._is_text_clause: self._label_name = None else: self._label_name = compile_state._label_convention(column) @@ -2974,6 +2997,7 @@ def __init__( entities_collection, parententity, raw_column_index, + is_current_entities, parent_bundle=None, ): annotations = column._annotations @@ -3000,9 +3024,13 @@ def __init__( self.translate_raw_column = raw_column_index is not None self.raw_column_index = raw_column_index - self._label_name = compile_state._label_convention( - column, col_name=orm_key - ) + + if is_current_entities: + self._label_name = compile_state._label_convention( + column, col_name=orm_key + ) + else: + self._label_name = None _entity._post_inspect self.entity_zero = self.entity_zero_or_selectable = ezero = _entity diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 6d9aee584af..0539e6fe658 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -44,7 +44,6 @@ from sqlalchemy.orm import attributes from sqlalchemy.orm import backref from sqlalchemy.orm import Bundle -from sqlalchemy.orm import clear_mappers from sqlalchemy.orm import column_property from sqlalchemy.orm import contains_eager from sqlalchemy.orm import defer @@ -861,6 +860,18 @@ def test_explicit_cols( assert_row_keys(stmt, expected, coreorm_exec) + def test_with_only_columns(self, assert_row_keys): + """test #8001""" + + User, Address = self.classes("User", "Address") + + stmt = select(User.id, Address.email_address).join_from(User, Address) + stmt = stmt.with_only_columns( + stmt.selected_columns.id, stmt.selected_columns.email_address + ) + + assert_row_keys(stmt, ["id", "email_address"], "orm") + def test_explicit_cols_legacy(self): User = self.classes.User @@ -1005,34 +1016,14 @@ def test_explicit_ambiguous_orm_cols_legacy(self): eq_(row._mapping.keys(), ["id", "name", "id", "name"]) @testing.fixture - def uname_fixture(self): + def uname_fixture(self, registry): class Foo(object): pass - if False: - # this conditional creates the table each time which would - # eliminate cross-test memoization issues. if the tests - # are failing without this then there's a memoization issue. - # check AnnotatedColumn memoized keys - m = MetaData() - users = Table( - "users", - m, - Column("id", Integer, primary_key=True), - Column( - "name", - String, - ), - ) - self.mapper_registry.map_imperatively( - Foo, users, properties={"uname": users.c.name} - ) - else: - users = self.tables.users - clear_mappers() - self.mapper_registry.map_imperatively( - Foo, users, properties={"uname": users.c.name} - ) + users = self.tables.users + registry.map_imperatively( + Foo, users, properties={"uname": users.c.name} + ) return Foo From 7e578de06e7b8da48143460ad19de1d0e8cbe6dd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 10 May 2022 10:32:18 -0400 Subject: [PATCH 228/632] bump zimports to 0.6.0 new multiprocessing support Change-Id: I165a419a67c4e4a5e49d15cf2ac5c8aa46d16cbc (cherry picked from commit 9294ac7fe02eb2f8aad78122eae199192f12ef52) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2be64772dd0..94ac2c6876b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,7 +9,7 @@ repos: - click < 8.1 - repo: https://github.com/sqlalchemyorg/zimports - rev: v0.5.0 + rev: v0.6.0 hooks: - id: zimports From 48ccecbe69795753baac1ca6d351c1af5ebcdd7b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 13 May 2022 09:24:09 -0400 Subject: [PATCH 229/632] search for pickle failures this is not for merge, trying to locate what might be the problem at #8000 Change-Id: I3ee7bf85f42eca861d32fc402b69796d518934d1 --- lib/sqlalchemy/testing/pickleable.py | 8 ++++++++ test/orm/_fixtures.py | 17 +++++++++++++++++ test/orm/test_selectin_relations.py | 19 +++++++++++++++++-- 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index 04405e53974..fb02157780e 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -20,6 +20,14 @@ class Order(fixtures.ComparableEntity): pass +class Item(fixtures.ComparableEntity): + pass + + +class Keyword(fixtures.ComparableEntity): + pass + + class Dingaling(fixtures.ComparableEntity): pass diff --git a/test/orm/_fixtures.py b/test/orm/_fixtures.py index 6715cb7feae..64a86ce7e59 100644 --- a/test/orm/_fixtures.py +++ b/test/orm/_fixtures.py @@ -396,6 +396,23 @@ def static(self): return CannedResults(self) +class PickleFixtureMixin(object): + @classmethod + def setup_classes(cls): + from sqlalchemy.testing import pickleable + + cls.classes.update( + { + "User": pickleable.User, + "Order": pickleable.Order, + "Address": pickleable.Address, + "Item": pickleable.Item, + "Keyword": pickleable.Keyword, + "Dingaling": pickleable.Dingaling, + } + ) + + class CannedResults(object): """Built on demand, instances use mappers in effect at time of call.""" diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index 3e44abe88f5..5df1c00e64e 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -1,3 +1,5 @@ +import pickle + import sqlalchemy as sa from sqlalchemy import bindparam from sqlalchemy import ForeignKey @@ -43,7 +45,11 @@ from .inheritance._poly_fixtures import Person -class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL): +class EagerTest( + _fixtures.PickleFixtureMixin, + _fixtures.FixtureTest, + testing.AssertsCompiledSQL, +): run_inserts = "once" run_deletes = None @@ -759,8 +765,17 @@ def _do_query_tests(self, opts, count): with fixture_session() as sess: def go(): + result = ( + sess.query(User).options(*opts).order_by(User.id).all() + ) + eq_( + result, + self.static.user_item_keyword_result, + ) + + print(f"pickling User structure based on: {opts}") eq_( - sess.query(User).options(*opts).order_by(User.id).all(), + pickle.loads(pickle.dumps(result)), self.static.user_item_keyword_result, ) From 3538289c3289f1de6174a2f7a3b842361bc11434 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 13 May 2022 10:25:37 -0400 Subject: [PATCH 230/632] more expire_on_commit reminders the session commit/close docs still feel awkward in how one learns about this operation. hopefully another pass over 2.0 can make things more linear. removed a 1.4 note about autobegin that was completely inaccurate; commit() does autobegin so it has an effect, just not usually on the database. Change-Id: Iaa4b96bd3df6cf82e851b2943322ddad7abbbac0 (cherry picked from commit cd628fad7c92f5f54bf1bf6985fd983269b0ec19) --- doc/build/orm/session_basics.rst | 45 +++++++++++++------- doc/build/tutorial/orm_data_manipulation.rst | 23 ++++++++++ lib/sqlalchemy/orm/session.py | 20 ++++++++- 3 files changed, 70 insertions(+), 18 deletions(-) diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index dd2a868daa5..bed901712d5 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -82,6 +82,18 @@ persisted to the database. If we were only issuing SELECT calls and did not need to write any changes, then the call to :meth:`_orm.Session.commit` would be unnecessary. +.. note:: + + Note that after :meth:`_orm.Session.commit` is called, either explicitly or + when using a context manager, all objects associated with the + :class:`.Session` are :term:`expired`, meaning their contents are erased to + be re-loaded within the next transaction. If these objects are instead + :term:`detached`, they will be non-functional until re-associated with a + new :class:`.Session`, unless the :paramref:`.Session.expire_on_commit` + parameter is used to disable this behavior. See the + section :ref:`session_committing` for more detail. + + .. _session_begin_commit_rollback_block: Framing out a begin / commit / rollback block @@ -771,13 +783,23 @@ Committing ---------- :meth:`~.Session.commit` is used to commit the current -transaction, if any. When there is no transaction in place, the method -passes silently. - -When :meth:`_orm.Session.commit` operates upon the current open transaction, -it first always issues :meth:`~.Session.flush` -beforehand to flush any remaining state to the database; this is independent -of the "autoflush" setting. +transaction. At its core this indicates that it emits ``COMMIT`` on +all current database connections that have a transaction in progress; +from a :term:`DBAPI` perspective this means the ``connection.commit()`` +DBAPI method is invoked on each DBAPI connection. + +When there is no transaction in place for the :class:`.Session`, indicating +that no operations were invoked on this :class:`.Session` since the previous +call to :meth:`.Session.commit`, the method will begin and commit an +internal-only "logical" transaction, that does not normally affect the database +unless pending flush changes were detected, but will still invoke event +handlers and object expiration rules. + +The :meth:`_orm.Session.commit` operation unconditionally issues +:meth:`~.Session.flush` before emitting COMMIT on relevant database +connections. If no pending changes are detected, then no SQL is emitted to the +database. This behavior is not configurable and is not affected by the +:paramref:`.Session.autoflush` parameter. Subsequent to that, :meth:`_orm.Session.commit` will then COMMIT the actual database transaction or transactions, if any, that are in place. @@ -789,15 +811,6 @@ result of a SELECT, they receive the most recent state. This behavior may be controlled by the :paramref:`_orm.Session.expire_on_commit` flag, which may be set to ``False`` when this behavior is undesirable. -.. versionchanged:: 1.4 - - The :class:`_orm.Session` object now features deferred "begin" behavior, as - described in :ref:`autobegin `. If no transaction is - begun, methods like :meth:`_orm.Session.commit` and - :meth:`_orm.Session.rollback` have no effect. This behavior would not - have been observed prior to 1.4 as under non-autocommit mode, a - transaction would always be implicitly present. - .. seealso:: :ref:`session_autobegin` diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index 740880567f4..1ee5e95fa95 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -214,6 +214,28 @@ behaviors and features: >>> session.commit() COMMIT +The above operation will commit the transaction that was in progress. The +objects which we've dealt with are still :term:`attached` to the :class:`.Session`, +which is a state they stay in until the :class:`.Session` is closed +(which is introduced at :ref:`tutorial_orm_closing`). + + +.. tip:: + + An important thing to note is that attributes on the objects that we just + worked with have been :term:`expired`, meaning, when we next access any + attributes on them, the :class:`.Session` will start a new transaction and + re-load their state. This option is sometimes problematic for both + performance reasons, or if one wishes to use the objects after closing the + :class:`.Session` (which is known as the :term:`detached` state), as they + will not have any state and will have no :class:`.Session` with which to load + that state, leading to "detached instance" errors. The behavior is + controllable using a parameter called :paramref:`.Session.expire_on_commit`. + More on this is at :ref:`tutorial_orm_closing`. + + + + .. _tutorial_orm_updating: Updating ORM Objects @@ -510,6 +532,7 @@ and of course the database data is present again as well: [...] ('patrick',){stop} True +.. _tutorial_orm_closing: Closing a Session ------------------ diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 315a1254fdc..c6a91693e30 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1401,8 +1401,22 @@ def rollback(self): def commit(self): """Flush pending changes and commit the current transaction. - If no transaction is in progress, the method will first - "autobegin" a new transaction and commit. + When the COMMIT operation is complete, all objects are fully + :term:`expired`, erasing their internal contents, which will be + automatically re-loaded when the objects are next accessed. In the + interim, these objects are in an expired state and will not function if + they are :term:`detached` from the :class:`.Session`. Additionally, + this re-load operation is not supported when using asyncio-oriented + APIs. The :paramref:`.Session.expire_on_commit` parameter may be used + to disable this behavior. + + When there is no transaction in place for the :class:`.Session`, + indicating that no operations were invoked on this :class:`.Session` + since the previous call to :meth:`.Session.commit`, the method will + begin and commit an internal-only "logical" transaction, that does not + normally affect the database unless pending flush changes were + detected, but will still invoke event handlers and object expiration + rules. If :term:`1.x-style` use is in effect and there are currently SAVEPOINTs in progress via :meth:`_orm.Session.begin_nested`, @@ -1427,6 +1441,8 @@ def commit(self): :ref:`unitofwork_transaction` + :ref:`asyncio_orm_avoid_lazyloads` + """ if self._transaction is None: if not self._autobegin(): From c0692f64a7e4aa805b0df11a5bd82ccc33e125dd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 13 May 2022 10:53:07 -0400 Subject: [PATCH 231/632] Revert "search for pickle failures" This reverts commit 48ccecbe69795753baac1ca6d351c1af5ebcdd7b. not being careful w/ the commandline as I eat breakfast today --- lib/sqlalchemy/testing/pickleable.py | 8 -------- test/orm/_fixtures.py | 17 ----------------- test/orm/test_selectin_relations.py | 19 ++----------------- 3 files changed, 2 insertions(+), 42 deletions(-) diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index fb02157780e..04405e53974 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -20,14 +20,6 @@ class Order(fixtures.ComparableEntity): pass -class Item(fixtures.ComparableEntity): - pass - - -class Keyword(fixtures.ComparableEntity): - pass - - class Dingaling(fixtures.ComparableEntity): pass diff --git a/test/orm/_fixtures.py b/test/orm/_fixtures.py index 64a86ce7e59..6715cb7feae 100644 --- a/test/orm/_fixtures.py +++ b/test/orm/_fixtures.py @@ -396,23 +396,6 @@ def static(self): return CannedResults(self) -class PickleFixtureMixin(object): - @classmethod - def setup_classes(cls): - from sqlalchemy.testing import pickleable - - cls.classes.update( - { - "User": pickleable.User, - "Order": pickleable.Order, - "Address": pickleable.Address, - "Item": pickleable.Item, - "Keyword": pickleable.Keyword, - "Dingaling": pickleable.Dingaling, - } - ) - - class CannedResults(object): """Built on demand, instances use mappers in effect at time of call.""" diff --git a/test/orm/test_selectin_relations.py b/test/orm/test_selectin_relations.py index 5df1c00e64e..3e44abe88f5 100644 --- a/test/orm/test_selectin_relations.py +++ b/test/orm/test_selectin_relations.py @@ -1,5 +1,3 @@ -import pickle - import sqlalchemy as sa from sqlalchemy import bindparam from sqlalchemy import ForeignKey @@ -45,11 +43,7 @@ from .inheritance._poly_fixtures import Person -class EagerTest( - _fixtures.PickleFixtureMixin, - _fixtures.FixtureTest, - testing.AssertsCompiledSQL, -): +class EagerTest(_fixtures.FixtureTest, testing.AssertsCompiledSQL): run_inserts = "once" run_deletes = None @@ -765,17 +759,8 @@ def _do_query_tests(self, opts, count): with fixture_session() as sess: def go(): - result = ( - sess.query(User).options(*opts).order_by(User.id).all() - ) - eq_( - result, - self.static.user_item_keyword_result, - ) - - print(f"pickling User structure based on: {opts}") eq_( - pickle.loads(pickle.dumps(result)), + sess.query(User).options(*opts).order_by(User.id).all(), self.static.user_item_keyword_result, ) From 067102a304012ce6afd0097627d5717994930488 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 14 May 2022 10:25:53 -0400 Subject: [PATCH 232/632] adjust log stacklevel for py3.11.0b1; enable greenlet Fixed issue where support for logging "stacklevel" implemented in :ticket:`7612` required adjustment to work with recently released Python 3.11.0b1, also repairs the unit tests which tested this feature. Install greenlet from a py311 compat patch. re: the stacklevel thing, this is going to be very inconvenient if we have to keep hardcoding numbers everywhere for every new python version Change-Id: I0c8f7293e98c0ca5cc544538284bfd1d3020cb1f References: https://github.com/python-greenlet/greenlet/issues/288 Fixes: #8019 (cherry picked from commit 43ff5b82dc0d91cacd625ac8943622ab340958c5) --- doc/build/changelog/unreleased_14/8019.rst | 7 +++++++ lib/sqlalchemy/engine/base.py | 8 ++++---- lib/sqlalchemy/log.py | 16 ++++++++++++++-- lib/sqlalchemy/testing/requirements.py | 4 ++++ lib/sqlalchemy/util/__init__.py | 1 + lib/sqlalchemy/util/compat.py | 1 + test/ext/asyncio/test_engine_py3k.py | 2 ++ test/requirements.py | 2 +- tox.ini | 4 ++++ 9 files changed, 38 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8019.rst diff --git a/doc/build/changelog/unreleased_14/8019.rst b/doc/build/changelog/unreleased_14/8019.rst new file mode 100644 index 00000000000..854703bceaa --- /dev/null +++ b/doc/build/changelog/unreleased_14/8019.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, engine, tests + :tickets: 8019 + + Fixed issue where support for logging "stacklevel" implemented in + :ticket:`7612` required adjustment to work with recently released Python + 3.11.0b1, also repairs the unit tests which tested this feature. diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index eca4a9e10aa..1507e159ed6 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -133,8 +133,8 @@ def _log_info(self, message, *arg, **kw): if fmt: message = fmt(message) - if util.py38: - kw["stacklevel"] = 2 + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET self.engine.logger.info(message, *arg, **kw) @@ -144,8 +144,8 @@ def _log_debug(self, message, *arg, **kw): if fmt: message = fmt(message) - if util.py38: - kw["stacklevel"] = 2 + if log.STACKLEVEL: + kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET self.engine.logger.debug(message, *arg, **kw) diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 07c5eff287c..cc662ecf996 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -21,8 +21,18 @@ import logging import sys +from .util import py311 from .util import py38 +if py38: + STACKLEVEL = True + # needed as of py3.11.0b1 + # #8019 + STACKLEVEL_OFFSET = 2 if py311 else 1 +else: + STACKLEVEL = False + STACKLEVEL_OFFSET = 0 + # set initial level to WARN. This so that # log statements don't occur in the absence of explicit # logging being enabled for 'sqlalchemy'. @@ -161,8 +171,10 @@ def log(self, level, msg, *args, **kwargs): selected_level = self.logger.getEffectiveLevel() if level >= selected_level: - if py38: - kwargs["stacklevel"] = kwargs.get("stacklevel", 1) + 1 + if STACKLEVEL: + kwargs["stacklevel"] = ( + kwargs.get("stacklevel", 1) + STACKLEVEL_OFFSET + ) self.logger._log(level, msg, args, **kwargs) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 49e3cefb413..b3f7ddb502e 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1382,6 +1382,10 @@ def async_dialect(self): return exclusions.closed() + @property + def asyncio(self): + return self.greenlet + @property def greenlet(self): def go(config): diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 497edb3b172..33427e3b504 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -77,6 +77,7 @@ from .compat import pickle from .compat import print_ from .compat import py2k +from .compat import py311 from .compat import py37 from .compat import py38 from .compat import py39 diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index c60d8842147..21a9491f8e6 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -14,6 +14,7 @@ import platform import sys +py311 = sys.version_info >= (3, 11) py39 = sys.version_info >= (3, 9) py38 = sys.version_info >= (3, 8) py37 = sys.version_info >= (3, 7) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 9340f0828dd..d8d9e702113 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -983,6 +983,8 @@ async def test_scalars(self, async_engine, filter_): class TextSyncDBAPI(fixtures.TestBase): + __requires__ = ("asyncio",) + def test_sync_dbapi_raises(self): with expect_raises_message( exc.InvalidRequestError, diff --git a/test/requirements.py b/test/requirements.py index 4c9ac40c54d..dda8fd6cbb7 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1439,7 +1439,7 @@ def check_range_types(config): def async_dialect(self): """dialect makes use of await_() to invoke operations on the DBAPI.""" - return only_on( + return self.asyncio + only_on( LambdaPredicate( lambda config: config.db.dialect.is_async, "Async dialect required", diff --git a/tox.ini b/tox.ini index 2000351716b..43eb04d8174 100644 --- a/tox.ini +++ b/tox.ini @@ -21,6 +21,10 @@ deps= pytest-xdist mock; python_version < '3.3' + # cython and greenlet both not working on 3.11 + # note cython not working for 3.11 at all right now + git+https://github.com/sqlalchemyorg/greenlet/@fix_py311_cpp#egg=greenlet; python_version >= '3.11' + sqlite: .[aiosqlite] sqlite_file: .[aiosqlite] sqlite_file: .[sqlcipher]; python_version >= '3' and python_version < '3.10' From 405f6afaaa8177726428b6738f5fd331341cc74e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 13 May 2022 15:43:53 -0400 Subject: [PATCH 233/632] raise for same param name in expanding + non expanding An informative error is raised if two individual :class:`.BindParameter` objects share the same name, yet one is used within an "expanding" context (typically an IN expression) and the other is not; mixing the same name in these two different styles of usage is not supported and typically the ``expanding=True`` parameter should be set on the parameters that are to receive list values outside of IN expressions (where ``expanding`` is set by default). Fixes: #8018 Change-Id: Ie707f29680eea16b9e421af93560ac1958e11a54 (cherry picked from commit f9fccdeeb6749d10aeec458f1a549906d58ddad8) --- doc/build/changelog/unreleased_14/8018.rst | 11 +++++++++++ lib/sqlalchemy/sql/compiler.py | 9 +++++++++ test/sql/test_compiler.py | 19 +++++++++++++++++++ 3 files changed, 39 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8018.rst diff --git a/doc/build/changelog/unreleased_14/8018.rst b/doc/build/changelog/unreleased_14/8018.rst new file mode 100644 index 00000000000..c4aae3eeac1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8018.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, sql + :tickets: 8018 + + An informative error is raised if two individual :class:`.BindParameter` + objects share the same name, yet one is used within an "expanding" context + (typically an IN expression) and the other is not; mixing the same name in + these two different styles of usage is not supported and typically the + ``expanding=True`` parameter should be set on the parameters that are to + receive list values outside of IN expressions (where ``expanding`` is set + by default). diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 7393629a406..bc2d657fb51 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2460,6 +2460,15 @@ def visit_bindparam( "Bind parameter '%s' conflicts with " "unique bind parameter of the same name" % name ) + elif existing.expanding != bindparam.expanding: + raise exc.CompileError( + "Can't reuse bound parameter name '%s' in both " + "'expanding' (e.g. within an IN expression) and " + "non-expanding contexts. If this parameter is to " + "receive a list/array value, set 'expanding=True' on " + "it for expressions that aren't IN, otherwise use " + "a different parameter name." % (name,) + ) elif existing._is_crud or bindparam._is_crud: raise exc.CompileError( "bindparam() name '%s' is reserved " diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index f5f17a35014..33f84142bc8 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3655,6 +3655,25 @@ def test_binds(self): s, ) + def test_expanding_non_expanding_conflict(self): + """test #8018""" + + s = select( + literal("x").in_(bindparam("q")), + bindparam("q"), + ) + + with expect_raises_message( + exc.CompileError, + r"Can't reuse bound parameter name 'q' in both 'expanding' " + r"\(e.g. within an IN expression\) and non-expanding contexts. " + "If this parameter is to " + "receive a list/array value, set 'expanding=True' on " + "it for expressions that aren't IN, otherwise use " + "a different parameter name.", + ): + str(s) + def test_unique_binds_no_clone_collision(self): """test #6824""" bp = bindparam("foo", unique=True) From db861363b7cfa82e5fe8c35fa83c8250d638f280 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 13 May 2022 16:08:34 -0400 Subject: [PATCH 234/632] render col name in on conflict set clause, not given key Fixed bug where the PostgreSQL :meth:`_postgresql.Insert.on_conflict` method and the SQLite :meth:`_sqlite.Insert.on_conflict` method would both fail to correctly accommodate a column with a separate ".key" when specifying the column using its key name in the dictionary passed to ``set_``, as well as if the :attr:`_sqlite.Insert.excluded` or :attr:`_postgresql.Insert.excluded` collection were used as the dictionary directly. Fixes: #8014 Change-Id: I67226aeedcb2c683e22405af64720cc1f990f274 (cherry picked from commit 927abc3b33f10464ed04db3d7a454faeb6e729f2) --- doc/build/changelog/unreleased_14/8014.rst | 11 +++ lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- test/dialect/postgresql/test_compiler.py | 88 ++++++++++++++++++---- test/dialect/test_sqlite.py | 47 +++++++++++- 5 files changed, 134 insertions(+), 16 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8014.rst diff --git a/doc/build/changelog/unreleased_14/8014.rst b/doc/build/changelog/unreleased_14/8014.rst new file mode 100644 index 00000000000..331a9577c58 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8014.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, sql, postgresql, sqlite + :tickets: 8014 + + Fixed bug where the PostgreSQL :meth:`_postgresql.Insert.on_conflict` + method and the SQLite :meth:`_sqlite.Insert.on_conflict` method would both + fail to correctly accommodate a column with a separate ".key" when + specifying the column using its key name in the dictionary passed to + ``set_``, as well as if the :attr:`_sqlite.Insert.excluded` or + :attr:`_postgresql.Insert.excluded` collection were used as the dictionary + directly. diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 7ba996a4a2d..ad2bdf18775 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -2530,7 +2530,7 @@ def visit_on_conflict_do_update(self, on_conflict, **kw): value.type = c.type value_text = self.process(value.self_group(), use_schema=False) - key_text = self.preparer.quote(col_key) + key_text = self.preparer.quote(c.name) action_set_ops.append("%s = %s" % (key_text, value_text)) # check for names that don't match columns diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 49e4b5c1955..0959d0417cf 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1385,7 +1385,7 @@ def visit_on_conflict_do_update(self, on_conflict, **kw): value.type = c.type value_text = self.process(value.self_group(), use_schema=False) - key_text = self.preparer.quote(col_key) + key_text = self.preparer.quote(c.name) action_set_ops.append("%s = %s" % (key_text, value_text)) # check for names that don't match columns diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 6bd2f2fa2be..d85ae9152fd 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -2282,41 +2282,103 @@ def test_difficult_update_4(self): ) -class InsertOnConflictTest(fixtures.TestBase, AssertsCompiledSQL): +class InsertOnConflictTest(fixtures.TablesTest, AssertsCompiledSQL): __dialect__ = postgresql.dialect() - def setup_test(self): - self.table1 = table1 = table( + run_create_tables = None + + @classmethod + def define_tables(cls, metadata): + cls.table1 = table1 = table( "mytable", column("myid", Integer), column("name", String(128)), column("description", String(128)), ) - md = MetaData() - self.table_with_metadata = Table( + cls.table_with_metadata = Table( "mytable", - md, + metadata, Column("myid", Integer, primary_key=True), Column("name", String(128)), Column("description", String(128)), ) - self.unique_constr = schema.UniqueConstraint( + cls.unique_constr = schema.UniqueConstraint( table1.c.name, name="uq_name" ) - self.excl_constr = ExcludeConstraint( + cls.excl_constr = ExcludeConstraint( (table1.c.name, "="), (table1.c.description, "&&"), name="excl_thing", ) - self.excl_constr_anon = ExcludeConstraint( - (self.table_with_metadata.c.name, "="), - (self.table_with_metadata.c.description, "&&"), - where=self.table_with_metadata.c.description != "foo", + cls.excl_constr_anon = ExcludeConstraint( + (cls.table_with_metadata.c.name, "="), + (cls.table_with_metadata.c.description, "&&"), + where=cls.table_with_metadata.c.description != "foo", ) - self.goofy_index = Index( + cls.goofy_index = Index( "goofy_index", table1.c.name, postgresql_where=table1.c.name > "m" ) + Table( + "users", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + ) + + Table( + "users_w_key", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), key="name_keyed"), + ) + + @testing.combinations("control", "excluded", "dict") + def test_set_excluded(self, scenario): + """test #8014, sending all of .excluded to set""" + + if scenario == "control": + users = self.tables.users + + stmt = insert(users) + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users.primary_key, set_=stmt.excluded + ), + "INSERT INTO users (id, name) VALUES (%(id)s, %(name)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + else: + users_w_key = self.tables.users_w_key + + stmt = insert(users_w_key) + + if scenario == "excluded": + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users_w_key.primary_key, set_=stmt.excluded + ), + "INSERT INTO users_w_key (id, name) " + "VALUES (%(id)s, %(name_keyed)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + else: + self.assert_compile( + stmt.on_conflict_do_update( + constraint=users_w_key.primary_key, + set_={ + "id": stmt.excluded.id, + "name_keyed": stmt.excluded.name_keyed, + }, + ), + "INSERT INTO users_w_key (id, name) " + "VALUES (%(id)s, %(name_keyed)s) ON " + "CONFLICT (id) DO UPDATE " + "SET id = excluded.id, name = excluded.name", + ) + def test_on_conflict_do_no_call_twice(self): users = self.table1 diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 6230c7f9459..ff98fea149b 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -2754,7 +2754,7 @@ def test_regexp_replace(self): ) -class OnConflictTest(fixtures.TablesTest): +class OnConflictTest(AssertsCompiledSQL, fixtures.TablesTest): __only_on__ = ("sqlite >= 3.24.0",) __backend__ = True @@ -2768,6 +2768,13 @@ def define_tables(cls, metadata): Column("name", String(50)), ) + Table( + "users_w_key", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), key="name_keyed"), + ) + class SpecialType(sqltypes.TypeDecorator): impl = String cache_ok = True @@ -2812,6 +2819,44 @@ def test_bad_args(self): ValueError, insert(self.tables.users).on_conflict_do_update ) + @testing.combinations("control", "excluded", "dict") + def test_set_excluded(self, scenario): + """test #8014, sending all of .excluded to set""" + + if scenario == "control": + users = self.tables.users + + stmt = insert(users) + self.assert_compile( + stmt.on_conflict_do_update(set_=stmt.excluded), + "INSERT INTO users (id, name) VALUES (?, ?) ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + else: + users_w_key = self.tables.users_w_key + + stmt = insert(users_w_key) + + if scenario == "excluded": + self.assert_compile( + stmt.on_conflict_do_update(set_=stmt.excluded), + "INSERT INTO users_w_key (id, name) VALUES (?, ?) " + "ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + else: + self.assert_compile( + stmt.on_conflict_do_update( + set_={ + "id": stmt.excluded.id, + "name_keyed": stmt.excluded.name_keyed, + } + ), + "INSERT INTO users_w_key (id, name) VALUES (?, ?) " + "ON CONFLICT " + "DO UPDATE SET id = excluded.id, name = excluded.name", + ) + def test_on_conflict_do_no_call_twice(self): users = self.tables.users From 9b55a423459236ca8a2ced713c9e93999dd18922 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 16 May 2022 10:57:51 -0400 Subject: [PATCH 235/632] fix most sphinx warnings still can't figure out the warnings with some of the older changelog files. Fixes: #7946 Change-Id: Id657ab23008eed0b133fed65b2f9ea75a626215c --- doc/build/changelog/changelog_06.rst | 1 + doc/build/changelog/changelog_08.rst | 1 + doc/build/changelog/changelog_09.rst | 2 +- doc/build/changelog/changelog_11.rst | 3 +- doc/build/changelog/changelog_12.rst | 2 +- doc/build/changelog/changelog_13.rst | 14 ++++----- doc/build/changelog/changelog_14.rst | 36 +++++++++++----------- doc/build/conf.py | 2 +- doc/build/core/metadata.rst | 2 +- doc/build/core/reflection.rst | 2 +- doc/build/dialects/mssql.rst | 12 ++++++-- doc/build/dialects/mysql.rst | 10 ++++++ doc/build/dialects/postgresql.rst | 5 +++ doc/build/glossary.rst | 20 ++++++++++++ doc/build/orm/collections.rst | 2 -- doc/build/orm/contextual.rst | 2 +- doc/build/orm/internals.rst | 1 - doc/build/orm/mapping_styles.rst | 2 +- doc/build/orm/query.rst | 1 + doc/build/orm/queryguide.rst | 6 ++-- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 8 +++-- lib/sqlalchemy/sql/schema.py | 15 +++++---- lib/sqlalchemy/sql/selectable.py | 2 +- lib/sqlalchemy/util/langhelpers.py | 6 +++- 25 files changed, 103 insertions(+), 56 deletions(-) diff --git a/doc/build/changelog/changelog_06.rst b/doc/build/changelog/changelog_06.rst index cd3b32d95b7..739df36b230 100644 --- a/doc/build/changelog/changelog_06.rst +++ b/doc/build/changelog/changelog_06.rst @@ -2,6 +2,7 @@ 0.6 Changelog ============= + .. changelog:: :version: 0.6.9 :released: Sat May 05 2012 diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index 4b6b42ec731..f6be2e3e19c 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -7,6 +7,7 @@ .. include:: changelog_07.rst :start-line: 5 + .. changelog:: :version: 0.8.7 :released: July 22, 2014 diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index 7ee874e0261..acf1ede9232 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -1920,7 +1920,7 @@ .. change:: :tags: feature, sql - Added :paramref:`.MetaData.reflect.**dialect_kwargs` + Added :paramref:`.MetaData.reflect.dialect_kwargs` to support dialect-level reflection options for all :class:`_schema.Table` objects reflected. diff --git a/doc/build/changelog/changelog_11.rst b/doc/build/changelog/changelog_11.rst index 1988b69b307..c84effc3905 100644 --- a/doc/build/changelog/changelog_11.rst +++ b/doc/build/changelog/changelog_11.rst @@ -20,7 +20,6 @@ :start-line: 5 - .. changelog:: :version: 1.1.18 :released: March 6, 2018 @@ -1076,7 +1075,7 @@ :tickets: 3842 Fixed bug where newly added warning for primary key on insert w/o - autoincrement setting (see :ref:`change_3216`) would fail to emit + autoincrement setting (see :ticket:`3216`) would fail to emit correctly when invoked upon a lower-case :func:`.table` construct. .. change:: 3852 diff --git a/doc/build/changelog/changelog_12.rst b/doc/build/changelog/changelog_12.rst index 6dc7d7f8879..b5d331e717b 100644 --- a/doc/build/changelog/changelog_12.rst +++ b/doc/build/changelog/changelog_12.rst @@ -453,7 +453,7 @@ :tickets: 4352 The column conflict resolution technique discussed at - :ref:`declarative_column_conflicts` is now functional for a :class:`_schema.Column` + :ref:`orm_inheritance_column_conflicts` is now functional for a :class:`_schema.Column` that is also a primary key column. Previously, a check for primary key columns declared on a single-inheritance subclass would occur before the column copy were allowed to pass. diff --git a/doc/build/changelog/changelog_13.rst b/doc/build/changelog/changelog_13.rst index 96002c19ee5..00c67ea3bc8 100644 --- a/doc/build/changelog/changelog_13.rst +++ b/doc/build/changelog/changelog_13.rst @@ -950,8 +950,8 @@ :tags: usecase, postgresql :tickets: 5265 - Added support for columns or type :class:`.ARRAY` of :class:`.Enum`, - :class:`.JSON` or :class:`_postgresql.JSONB` in PostgreSQL. + Added support for columns or type :class:`_sqltypes.ARRAY` of :class:`.Enum`, + :class:`_postgresql.JSON` or :class:`_postgresql.JSONB` in PostgreSQL. Previously a workaround was required in these use cases. @@ -1002,7 +1002,7 @@ :tickets: 5266 Raise an explicit :class:`.exc.CompileError` when adding a table with a - column of type :class:`.ARRAY` of :class:`.Enum` configured with + column of type :class:`_sqltypes.ARRAY` of :class:`.Enum` configured with :paramref:`.Enum.native_enum` set to ``False`` when :paramref:`.Enum.create_constraint` is not set to ``False`` @@ -1966,13 +1966,13 @@ :class:`_types.JSON` - :meth:`.JSON.Comparator.as_string` + :meth:`_sqltypes.JSON.Comparator.as_string` - :meth:`.JSON.Comparator.as_boolean` + :meth:`_sqltypes.JSON.Comparator.as_boolean` - :meth:`.JSON.Comparator.as_float` + :meth:`_sqltypes.JSON.Comparator.as_float` - :meth:`.JSON.Comparator.as_integer` + :meth:`_sqltypes.JSON.Comparator.as_integer` .. change:: :tags: usecase, oracle diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index d8f57915c53..82022c929d6 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -34,7 +34,7 @@ This document details individual issue-level changes made throughout :tickets: 7936 Fixed regression where the change made for :ticket:`7861`, released in - version 1.4.33, that brought the :class:`.Insert` construct to be partially + version 1.4.33, that brought the :class:`_sql.Insert` construct to be partially recognized as an ORM-enabled statement did not properly transfer the correct mapper / mapped table state to the :class:`.Session`, causing the :meth:`.Session.get_bind` method to fail for a :class:`.Session` that was @@ -58,7 +58,7 @@ This document details individual issue-level changes made throughout :tags: bug, postgresql :tickets: 6515 - Fixed bug in :class:`.ARRAY` datatype in combination with :class:`.Enum` on + Fixed bug in :class:`_sqltypes.ARRAY` datatype in combination with :class:`.Enum` on PostgreSQL where using the ``.any()`` or ``.all()`` methods to render SQL ANY() or ALL(), given members of the Python enumeration as arguments, would produce a type adaptation failure on all drivers. @@ -87,7 +87,7 @@ This document details individual issue-level changes made throughout :tickets: 7930 Fixed an issue in the psycopg2 dialect when using the - :paramref:`.create_engine.pool_pre_ping` parameter which would cause + :paramref:`_sa.create_engine.pool_pre_ping` parameter which would cause user-configured ``AUTOCOMMIT`` isolation level to be inadvertently reset by the "ping" handler. @@ -105,15 +105,15 @@ This document details individual issue-level changes made throughout :tags: bug, engine :tickets: 7953 - Added a warning regarding a bug which exists in the :meth:`.Result.columns` - method when passing 0 for the index in conjunction with a :class:`.Result` + Added a warning regarding a bug which exists in the :meth:`_result.Result.columns` + method when passing 0 for the index in conjunction with a :class:`_result.Result` that will return a single ORM entity, which indicates that the current - behavior of :meth:`.Result.columns` is broken in this case as the - :class:`.Result` object will yield scalar values and not :class:`.Row` + behavior of :meth:`_result.Result.columns` is broken in this case as the + :class:`_result.Result` object will yield scalar values and not :class:`.Row` objects. The issue will be fixed in 2.0, which would be a backwards-incompatible change for code that relies on the current broken behavior. Code which wants to receive a collection of scalar values should - use the :meth:`.Result.scalars` method, which will return a new + use the :meth:`_result.Result.scalars` method, which will return a new :class:`.ScalarResult` object that yields non-row scalar objects. @@ -199,7 +199,7 @@ This document details individual issue-level changes made throughout :tickets: 7878 Fixed regression caused by :ticket:`7861` where invoking an - :class:`.Insert` construct which contained ORM entities directly via + :class:`_sql.Insert` construct which contained ORM entities directly via :meth:`_orm.Session.execute` would fail. .. change:: @@ -226,8 +226,8 @@ This document details individual issue-level changes made throughout and COMMIT log messages do not actually indicate a real transaction when the AUTOCOMMIT isolation level is in use; messaging has been extended to include the BEGIN message itself, and the messaging has also been fixed to - accommodate when the :class:`.Engine` level - :paramref:`.create_engine.isolation_level` parameter was used directly. + accommodate when the :class:`_engine.Engine` level + :paramref:`_sa.create_engine.isolation_level` parameter was used directly. .. change:: :tags: bug, mssql, regression @@ -314,7 +314,7 @@ This document details individual issue-level changes made throughout :tags: usecase, engine :tickets: 7877, 7815 - Added new parameter :paramref:`.Engine.dispose.close`, defaulting to True. + Added new parameter :paramref:`_engine.Engine.dispose.close`, defaulting to True. When False, the engine disposal does not touch the connections in the old pool at all, simply dropping the pool and replacing it. This use case is so that when the original pool is transferred from a parent process, the @@ -339,7 +339,7 @@ This document details individual issue-level changes made throughout Added new attributes :attr:`.UpdateBase.returning_column_descriptions` and :attr:`.UpdateBase.entity_description` to allow for inspection of ORM - attributes and entities that are installed as part of an :class:`.Insert`, + attributes and entities that are installed as part of an :class:`_sql.Insert`, :class:`.Update`, or :class:`.Delete` construct. The :attr:`.Select.column_descriptions` accessor is also now implemented for Core-only selectables. @@ -1638,15 +1638,15 @@ This document details individual issue-level changes made throughout :tags: bug, orm :tickets: 7128 - Fixed bug where iterating a :class:`.Result` from a :class:`_orm.Session` + Fixed bug where iterating a :class:`_result.Result` from a :class:`_orm.Session` after that :class:`_orm.Session` were closed would partially attach objects to that session in an essentially invalid state. It now raises an exception with a link to new documentation if an **un-buffered** result is iterated from a :class:`_orm.Session` that was closed or otherwise had the - :meth:`_orm.Session.expunge_all` method called after that :class:`.Result` + :meth:`_orm.Session.expunge_all` method called after that :class:`_result.Result` was generated. The ``prebuffer_rows`` execution option, as is used automatically by the asyncio extension for client-side result sets, may be - used to produce a :class:`.Result` where the ORM objects are prebuffered, + used to produce a :class:`_result.Result` where the ORM objects are prebuffered, and in this case iterating the result will produce a series of detached objects. @@ -3484,7 +3484,7 @@ This document details individual issue-level changes made throughout :tickets: 6361 Fixed issue where usage of an explicit :class:`.Sequence` would produce - inconsistent "inline" behavior for an :class:`.Insert` construct that + inconsistent "inline" behavior for an :class:`_sql.Insert` construct that includes multiple values phrases; the first seq would be inline but subsequent ones would be "pre-execute", leading to inconsistent sequence ordering. The sequence expressions are now fully inline. @@ -4931,7 +4931,7 @@ This document details individual issue-level changes made throughout :tags: bug, engine, sqlite :tickets: 5845 - Fixed bug in the 2.0 "future" version of :class:`.Engine` where emitting + Fixed bug in the 2.0 "future" version of :class:`_engine.Engine` where emitting SQL during the :meth:`.EngineEvents.begin` event hook would cause a re-entrant (recursive) condition due to autobegin, affecting among other things the recipe documented for SQLite to allow for savepoints and diff --git a/doc/build/conf.py b/doc/build/conf.py index ee5d37066c4..8567fed0602 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -106,7 +106,7 @@ changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s" -exclude_patterns = ["build", "**/unreleased*/*", "*_include.rst"] +exclude_patterns = ["build", "**/unreleased*/*", "**/*_include.rst"] # zzzeeksphinx makes these conversions when it is rendering the # docstrings classes, methods, and functions within the scope of diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 7c5b7dd668d..366f165651b 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -384,7 +384,7 @@ at once, such as:: :ref:`multipart_schema_names` - describes use of dotted schema names with the SQL Server dialect. - :ref:`schema_table_reflection` + :ref:`metadata_reflection_schemas` .. _schema_metadata_schema_name: diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index fdcbf8c3970..d9547344e7b 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -139,7 +139,7 @@ a :class:`_schema.MetaData` object configured with a default schema name >>> metadata_obj = MetaData(schema="project") -The :method:`.MetaData.reflect` will then utilize that configured ``.schema`` +The :meth:`.MetaData.reflect` will then utilize that configured ``.schema`` for reflection:: >>> # uses `schema` configured in metadata_obj diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index f3060e62f6f..7484000dbcc 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -29,6 +29,10 @@ they originate from :mod:`sqlalchemy.types` or from the local dialect:: Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.mssql .. autoclass:: BIT @@ -37,6 +41,7 @@ construction arguments, are as follows: .. autoclass:: CHAR :members: __init__ + :noindex: .. autoclass:: DATETIME2 @@ -61,6 +66,7 @@ construction arguments, are as follows: .. autoclass:: NCHAR :members: __init__ + :noindex: .. autoclass:: NTEXT @@ -69,7 +75,7 @@ construction arguments, are as follows: .. autoclass:: NVARCHAR :members: __init__ - + :noindex: .. autoclass:: REAL :members: __init__ @@ -91,7 +97,7 @@ construction arguments, are as follows: .. autoclass:: TEXT :members: __init__ - + :noindex: .. autoclass:: TIME :members: __init__ @@ -110,9 +116,11 @@ construction arguments, are as follows: .. autoclass:: VARBINARY :members: __init__ + :noindex: .. autoclass:: VARCHAR :members: __init__ + :noindex: .. autoclass:: XML diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index 9eb7f5a7405..49dbff71bab 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -29,6 +29,10 @@ valid with MySQL are importable from the top level dialect:: Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.mysql .. autoclass:: BIGINT @@ -36,6 +40,7 @@ construction arguments, are as follows: .. autoclass:: BINARY + :noindex: :members: __init__ @@ -45,10 +50,12 @@ construction arguments, are as follows: .. autoclass:: BLOB :members: __init__ + :noindex: .. autoclass:: BOOLEAN :members: __init__ + :noindex: .. autoclass:: CHAR @@ -57,6 +64,7 @@ construction arguments, are as follows: .. autoclass:: DATE :members: __init__ + :noindex: .. autoclass:: DATETIME @@ -131,6 +139,7 @@ construction arguments, are as follows: .. autoclass:: TEXT :members: __init__ + :noindex: .. autoclass:: TIME @@ -155,6 +164,7 @@ construction arguments, are as follows: .. autoclass:: VARBINARY :members: __init__ + :noindex: .. autoclass:: VARCHAR diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index 34cdabc1039..1c4b982e0a7 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -22,6 +22,10 @@ they originate from :mod:`sqlalchemy.types` or from the local dialect:: Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: +.. note: where :noindex: is used, indicates a type that is not redefined + in the dialect module, just imported from sqltypes. this avoids warnings + in the sphinx build + .. currentmodule:: sqlalchemy.dialects.postgresql .. autoclass:: aggregate_order_by @@ -80,6 +84,7 @@ construction arguments, are as follows: .. autoclass:: REAL :members: __init__ + :noindex: .. autoclass:: REGCLASS diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 2eb7912497f..1e663502414 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -74,6 +74,18 @@ Glossary # Session returns a Result that has ORM entities list_of_users = result.scalars().all() + imperative + declarative + + In the SQLAlchemy ORM, these terms refer to two different styles of + mapping Python classes to database tables. + + .. seealso:: + + :ref:`orm_declarative_mapping` + + :ref:`orm_imperative_mapping` + facade An object that serves as a front-facing interface masking more complex @@ -1508,3 +1520,11 @@ Glossary :ref:`session_object_states` + attached + Indicates an ORM object that is presently associated with a specific + :term:`Session`. + + .. seealso:: + + :ref:`session_object_states` + diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 31db0b26168..b09281501a5 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -658,8 +658,6 @@ Various internal methods. .. autofunction:: bulk_replace -.. autoclass:: collection - .. autodata:: collection_adapter .. autoclass:: CollectionAdapter diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index eafdee42766..102ea50d885 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -22,7 +22,7 @@ Architecture `_. .. warning:: The :class:`.scoped_session` registry by default uses a Python - `threading.local() `_ + ``threading.local()`` in order to track :class:`_orm.Session` instances. **This is not necessarily compatible with all application servers**, particularly those which make use of greenlets or other alternative forms of concurrency diff --git a/doc/build/orm/internals.rst b/doc/build/orm/internals.rst index 8520fd07c14..54e0dd59cf9 100644 --- a/doc/build/orm/internals.rst +++ b/doc/build/orm/internals.rst @@ -16,7 +16,6 @@ sections, are listed here. .. autoclass:: ClassManager :members: - :inherited-members: .. autoclass:: ColumnProperty :members: diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index c5791b11cfa..7c7817aec4d 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -194,7 +194,7 @@ Imperative Mapping with Dataclasses and Attrs --------------------------------------------- As described in the section :ref:`orm_declarative_dataclasses`, the -``@dataclass`` decorator and the attrs_ library both work as class +``@dataclass`` decorator and the ``attrs`` library both work as class decorators that are applied to a class first, before it is passed to SQLAlchemy for mapping. Just like we can use the :meth:`_orm.registry.mapped` decorator in order to apply declarative-style diff --git a/doc/build/orm/query.rst b/doc/build/orm/query.rst index d7711671cf1..498679ea9eb 100644 --- a/doc/build/orm/query.rst +++ b/doc/build/orm/query.rst @@ -43,6 +43,7 @@ ORM-Specific Query Constructs .. autoclass:: sqlalchemy.orm.Load :members: + :noindex: .. autofunction:: sqlalchemy.orm.with_loader_criteria diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index f6d6ce711c0..d176087a883 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1088,8 +1088,8 @@ matching objects locally present in the :class:`_orm.Session`. See the section Inspecting entities and columns from ORM-enabled SELECT and DML statements ========================================================================== -The :func:`.select` construct, as well as the :func:`.insert`, :func:`.update` -and :func:`.delete` constructs (for the latter DML constructs, as of SQLAlchemy +The :func:`_sql.select` construct, as well as the :func:`_sql.insert`, :func:`_sql.update` +and :func:`_sql.delete` constructs (for the latter DML constructs, as of SQLAlchemy 1.4.33), all support the ability to inspect the entities in which these statements are created against, as well as the columns and datatypes that would be returned in a result set. @@ -1145,7 +1145,7 @@ cases:: this would raise ``NotImplementedError``. -For :func:`.insert`, :func:`.update` and :func:`.delete` constructs, there are +For :func:`_sql.insert`, :func:`.update` and :func:`.delete` constructs, there are two separate attributes. One is :attr:`.UpdateBase.entity_description` which returns information about the primary ORM entity and database table which the DML construct would be affecting:: diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index ad2bdf18775..92d9e263e87 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -283,7 +283,7 @@ def set_search_path(dbapi_connection, connection_record): attribute set up. The PostgreSQL dialect can reflect tables from any schema, as outlined in -:ref:`schema_table_reflection`. +:ref:`metadata_reflection_schemas`. With regards to tables which these :class:`_schema.Table` objects refer to via foreign key constraint, a decision must be made as to how diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 53ebbc00978..ce6a0db090c 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -312,7 +312,9 @@ async def stream( **kw ): """Execute a statement and return a streaming - :class:`_asyncio.AsyncResult` object.""" + :class:`_asyncio.AsyncResult` object. + + """ if execution_options: execution_options = util.immutabledict(execution_options).union( @@ -516,8 +518,8 @@ async def connection(self, **kw): This method may also be used to establish execution options for the database connection used by the current transaction. - .. versionadded:: 1.4.24 Added **kw arguments which are passed through - to the underlying :meth:`_orm.Session.connection` method. + .. versionadded:: 1.4.24 Added \**kw arguments which are passed + through to the underlying :meth:`_orm.Session.connection` method. .. seealso:: diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index aa904fcf5a8..322f630c7dd 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1264,7 +1264,7 @@ def __init__(self, *args, **kwargs): value automatically for this column, which will be accessible after the statement is invoked via the :attr:`.CursorResult.inserted_primary_key` attribute upon the - :class:`.Result` object. This also applies towards use of the + :class:`_result.Result` object. This also applies towards use of the ORM when ORM-mapped objects are persisted to the database, indicating that a new integer primary key will be available to become part of the :term:`identity key` for that object. This @@ -1311,9 +1311,9 @@ def __init__(self, *args, **kwargs): Column('id', ForeignKey('other.id'), primary_key=True, autoincrement='ignore_fk') - It is typically not desirable to have "autoincrement" enabled on a - column that refers to another via foreign key, as such a column is - required to refer to a value that originates from elsewhere. + It is typically not desirable to have "autoincrement" enabled on a + column that refers to another via foreign key, as such a column is + required to refer to a value that originates from elsewhere. The setting has these effects on columns that meet the above criteria: @@ -1351,9 +1351,9 @@ def __init__(self, *args, **kwargs): using a method specific to the database driver in use: * MySQL, SQLite - calling upon ``cursor.lastrowid()`` - (see - `https://www.python.org/dev/peps/pep-0249/#lastrowid - `_) + (see + `https://www.python.org/dev/peps/pep-0249/#lastrowid + `_) * PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent construct when rendering an INSERT statement, and then retrieving the newly generated primary key values after execution @@ -1378,7 +1378,6 @@ def __init__(self, *args, **kwargs): "fast insertmany" feature. Such features are very new and may not yet be well covered in documentation. - :param default: A scalar, Python callable, or :class:`_expression.ColumnElement` expression representing the *default value* for this column, which will be invoked upon insert diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 740085043c3..353f37b2540 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -901,7 +901,7 @@ def _anonymous_fromclause(self, name=None, flat=False): Below, all column names are given a label so that the two same-named columns ``columna`` are disambiguated as ``table1_columna`` and - ``table2_columna`:: + ``table2_columna``:: >>> from sqlalchemy import table, column, select, true, LABEL_STYLE_TABLENAME_PLUS_COL >>> table1 = table("table1", column("columna"), column("columnb")) diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index 8d5de183140..c3636f0aba0 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1167,7 +1167,11 @@ def _set_memoized_attribute(self, key, value): self._memoized_keys |= {key} class memoized_attribute(object): - """A read-only @property that is only evaluated once.""" + """A read-only @property that is only evaluated once. + + :meta private: + + """ def __init__(self, fget, doc=None): self.fget = fget From 5eee848299f6003819e18dd9e9bdc3acc6f34b42 Mon Sep 17 00:00:00 2001 From: Robert Kulagowski Date: Wed, 18 May 2022 15:25:18 -0500 Subject: [PATCH 236/632] Update dbapi_transactions.rst (#8032) If you're defining 'y=row.y' then you might as well use 'y' in the print statement. (cherry picked from commit b66d57451ad28572c000490c10e451cbf600a9d1) --- doc/build/tutorial/dbapi_transactions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 0249702ef6b..a9dff8f3851 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -270,7 +270,7 @@ Below we illustrate a variety of ways to access rows. y = row.y # illustrate use with Python f-strings - print(f"Row: {row.x} {row.y}") + print(f"Row: {row.x} {y}") .. From c28bccd8964958792e129d82e14630990809689f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 18 May 2022 16:21:49 -0400 Subject: [PATCH 237/632] favor bool_op over op in comparison there's no need to use the is_comparison parameter anymore as bool_op() works better and in 2.0 also does typing correctly. Change-Id: I9e92b665b112d40d90e539003b0efe00ed7b075f (cherry picked from commit deb9bcc0d97dd8b38dfccb340a5fc1f880202ff6) --- doc/build/orm/join_conditions.rst | 24 ++++++------------------ lib/sqlalchemy/sql/operators.py | 18 ++++++++++++------ test/orm/test_relationships.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 46 insertions(+), 25 deletions(-) diff --git a/doc/build/orm/join_conditions.rst b/doc/build/orm/join_conditions.rst index af314f221eb..509ccc98f39 100644 --- a/doc/build/orm/join_conditions.rst +++ b/doc/build/orm/join_conditions.rst @@ -264,21 +264,13 @@ Using custom operators in join conditions Another use case for relationships is the use of custom operators, such as PostgreSQL's "is contained within" ``<<`` operator when joining with types such as :class:`_postgresql.INET` and :class:`_postgresql.CIDR`. -For custom operators we use the :meth:`.Operators.op` function:: +For custom boolean operators we use the :meth:`.Operators.bool_op` function:: - inet_column.op("<<")(cidr_column) + inet_column.bool_op("<<")(cidr_column) -However, if we construct a :paramref:`_orm.relationship.primaryjoin` using this -operator, :func:`_orm.relationship` will still need more information. This is because -when it examines our primaryjoin condition, it specifically looks for operators -used for **comparisons**, and this is typically a fixed list containing known -comparison operators such as ``==``, ``<``, etc. So for our custom operator -to participate in this system, we need it to register as a comparison operator -using the :paramref:`~.Operators.op.is_comparison` parameter:: - - inet_column.op("<<", is_comparison=True)(cidr_column) - -A complete example:: +A comparison like the above may be used directly with +:paramref:`_orm.relationship.primaryjoin` when constructing +a :func:`_orm.relationship`:: class IPA(Base): __tablename__ = 'ip_address' @@ -287,7 +279,7 @@ A complete example:: v4address = Column(INET) network = relationship("Network", - primaryjoin="IPA.v4address.op('<<', is_comparison=True)" + primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))", viewonly=True ) @@ -306,10 +298,6 @@ Will render as:: SELECT ip_address.id AS ip_address_id, ip_address.v4address AS ip_address_v4address FROM ip_address JOIN network ON ip_address.v4address << network.v4representation -.. versionadded:: 0.9.2 - Added the :paramref:`.Operators.op.is_comparison` - flag to assist in the creation of :func:`_orm.relationship` constructs using - custom operators. - .. _relationship_custom_operator_sql_function: Custom operators based on SQL functions diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 31a2a01a734..4ab0c4f29ea 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -153,14 +153,16 @@ def op( A value of 100 will be higher or equal to all operators, and -100 will be lower than or equal to all operators. - :param is_comparison: if True, the operator will be considered as a - "comparison" operator, that is which evaluates to a boolean - true/false value, like ``==``, ``>``, etc. This flag should be set + :param is_comparison: legacy; if True, the operator will be considered + as a "comparison" operator, that is which evaluates to a boolean + true/false value, like ``==``, ``>``, etc. This flag is provided so that ORM relationships can establish that the operator is a comparison operator when used in a custom join condition. - .. versionadded:: 0.9.2 - added the - :paramref:`.Operators.op.is_comparison` flag. + Using the ``is_comparison`` parameter is superseded by using the + :meth:`.Operators.bool_op` method instead; this more succinct + operator sets this parameter automatically. In SQLAlchemy 2.0 it + will also provide for improved typing support. :param return_type: a :class:`.TypeEngine` class or object that will force the return type of an expression produced by this operator @@ -171,6 +173,8 @@ def op( .. seealso:: + :meth:`.Operators.bool_op` + :ref:`types_operators` :ref:`relationship_custom_operator` @@ -189,7 +193,9 @@ def bool_op(self, opstring, precedence=0): This method is shorthand for calling :meth:`.Operators.op` and passing the :paramref:`.Operators.op.is_comparison` - flag with True. + flag with True. A key advantage to using :meth:`.Operators.bool_op` + is that when using column constructs, the "boolean" nature of the + returned expression will be present for :pep:`484` purposes. .. seealso:: diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py index acb22ce0f8d..1dc5b37fd2d 100644 --- a/test/orm/test_relationships.py +++ b/test/orm/test_relationships.py @@ -2906,7 +2906,7 @@ def define_tables(cls, metadata): Column("foo", String(50)), ) - def test_join_on_custom_op(self): + def test_join_on_custom_op_legacy_is_comparison(self): class A(fixtures.BasicEntity): pass @@ -2933,6 +2933,33 @@ class B(fixtures.BasicEntity): "FROM a JOIN b ON a.foo &* b.foo", ) + def test_join_on_custom_bool_op(self): + class A(fixtures.BasicEntity): + pass + + class B(fixtures.BasicEntity): + pass + + self.mapper_registry.map_imperatively( + A, + self.tables.a, + properties={ + "bs": relationship( + B, + primaryjoin=self.tables.a.c.foo.bool_op("&*")( + foreign(self.tables.b.c.foo) + ), + viewonly=True, + ) + }, + ) + self.mapper_registry.map_imperatively(B, self.tables.b) + self.assert_compile( + fixture_session().query(A).join(A.bs), + "SELECT a.id AS a_id, a.foo AS a_foo " + "FROM a JOIN b ON a.foo &* b.foo", + ) + class ViewOnlyHistoryTest(fixtures.MappedTest): @classmethod From ce4ab47c1633db38dc2c6b2522a9d7d9c09ac598 Mon Sep 17 00:00:00 2001 From: valievkarim Date: Wed, 18 May 2022 16:24:41 -0400 Subject: [PATCH 238/632] Include new MySQL error code 4031 for MySQL disconnect check Added disconnect code for MySQL error 4031, introduced in MySQL >= 8.0.24, indicating connection idle timeout exceeded. In particular this repairs an issue where pre-ping could not reconnect on a timed-out connection. Pull request courtesy valievkarim. Fixes: #8036 Closes: #8037 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8037 Pull-request-sha: 7ab605c2d25c3cd83af41e3250c97c623220cc7a Change-Id: I21249c9d8acb305ac43ce61b90b41daf7fabdfe8 (cherry picked from commit de399c914b923ec3c81d3a51e16c7b720d34e058) --- doc/build/changelog/unreleased_14/8036.rst | 8 ++++++++ lib/sqlalchemy/dialects/mysql/base.py | 1 + test/dialect/mysql/test_dialect.py | 4 ++++ 3 files changed, 13 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8036.rst diff --git a/doc/build/changelog/unreleased_14/8036.rst b/doc/build/changelog/unreleased_14/8036.rst new file mode 100644 index 00000000000..52b956b6b4a --- /dev/null +++ b/doc/build/changelog/unreleased_14/8036.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mysql + :tickets: 8036 + + Added disconnect code for MySQL error 4031, introduced in MySQL >= 8.0.24, + indicating connection idle timeout exceeded. In particular this repairs an + issue where pre-ping could not reconnect on a timed-out connection. Pull + request courtesy valievkarim. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 260c147ddfe..e4d89b2dce2 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -2591,6 +2591,7 @@ def is_disconnect(self, e, connection, cursor): 2014, 2045, 2055, + 4031, ): return True elif isinstance( diff --git a/test/dialect/mysql/test_dialect.py b/test/dialect/mysql/test_dialect.py index 1b34af05390..6f60a215004 100644 --- a/test/dialect/mysql/test_dialect.py +++ b/test/dialect/mysql/test_dialect.py @@ -199,6 +199,10 @@ class DialectTest(fixtures.TestBase): (2006, "foo", "OperationalError", "pymysql", True), (2007, "foo", "OperationalError", "mysqldb", False), (2007, "foo", "OperationalError", "pymysql", False), + (4031, "foo", "OperationalError", "mysqldb", True), + (4031, "foo", "OperationalError", "pymysql", True), + (4032, "foo", "OperationalError", "mysqldb", False), + (4032, "foo", "OperationalError", "pymysql", False), ) def test_is_disconnect( self, arg0, message, exc_cls_name, dialect_name, is_disconnect From 0810b0048f24ad1f7e3a1643626990896a68586a Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 18 May 2022 22:20:01 +0200 Subject: [PATCH 239/632] Use collation in reflection in MSSQL Explicitly specify the collation when reflecting table columns using MSSQL to prevent "collation conflict" errors. Fixes: #8035 Change-Id: I4239a5ca8b041f56d7b3bba67b3357c176db31ee (cherry picked from commit 5e54d5f1ad022781f5d8c6c7da4802613050dde5) --- doc/build/changelog/unreleased_14/8035.rst | 6 +++ lib/sqlalchemy/dialects/mssql/base.py | 6 ++- test/dialect/mssql/test_reflection.py | 48 ++++++++++++++++++++++ 3 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8035.rst diff --git a/doc/build/changelog/unreleased_14/8035.rst b/doc/build/changelog/unreleased_14/8035.rst new file mode 100644 index 00000000000..ea6ece0556e --- /dev/null +++ b/doc/build/changelog/unreleased_14/8035.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, mssql, reflection + :tickets: 8035 + + Explicitly specify the collation when reflecting table columns using + MSSQL to prevent "collation conflict" errors. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 40c06ff0080..1658f27c70c 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -3189,14 +3189,16 @@ def get_columns(self, connection, tablename, dbname, owner, schema, **kw): computed_cols, onclause=sql.and_( computed_cols.c.object_id == func.object_id(full_name), - computed_cols.c.name == columns.c.column_name, + computed_cols.c.name + == columns.c.column_name.collate("DATABASE_DEFAULT"), ), isouter=True, ).join( identity_cols, onclause=sql.and_( identity_cols.c.object_id == func.object_id(full_name), - identity_cols.c.name == columns.c.column_name, + identity_cols.c.name + == columns.c.column_name.collate("DATABASE_DEFAULT"), ), isouter=True, ) diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index 1fa301e282b..781b4ef188f 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -24,6 +24,7 @@ from sqlalchemy.dialects.mssql import base from sqlalchemy.dialects.mssql.information_schema import CoerceUnicode from sqlalchemy.dialects.mssql.information_schema import tables +from sqlalchemy.pool import NullPool from sqlalchemy.schema import CreateIndex from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import ComparesTables @@ -34,6 +35,7 @@ from sqlalchemy.testing import is_ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock +from sqlalchemy.testing import provision class ReflectionTest(fixtures.TestBase, ComparesTables, AssertsCompiledSQL): @@ -358,6 +360,52 @@ def test_has_table_temp_temp_present_both_sessions(self): "drop table #myveryveryuniquetemptablename" ) + @testing.fixture + def temp_db_alt_collation_fixture( + self, connection_no_trans, testing_engine + ): + temp_db_name = "%s_different_collation" % ( + provision.FOLLOWER_IDENT or "default" + ) + cnxn = connection_no_trans.execution_options( + isolation_level="AUTOCOMMIT" + ) + cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name) + cnxn.exec_driver_sql( + "CREATE DATABASE %s COLLATE Danish_Norwegian_CI_AS" % temp_db_name + ) + eng = testing_engine( + url=testing.db.url.set(database=temp_db_name), + options=dict(poolclass=NullPool, future=True), + ) + + yield eng + + cnxn.exec_driver_sql("DROP DATABASE IF EXISTS %s" % temp_db_name) + + def test_global_temp_different_collation( + self, temp_db_alt_collation_fixture + ): + """test #8035""" + + with temp_db_alt_collation_fixture.connect() as conn: + conn.exec_driver_sql("CREATE TABLE ##foo (id int primary key)") + conn.commit() + + eq_( + inspect(conn).get_columns("##foo"), + [ + { + "name": "id", + "type": testing.eq_type_affinity(sqltypes.INTEGER), + "nullable": False, + "default": None, + "autoincrement": False, + } + ], + ) + Table("##foo", MetaData(), autoload_with=conn) + def test_db_qualified_items(self, metadata, connection): Table("foo", metadata, Column("id", Integer, primary_key=True)) Table( From d19d5d72ad1e15d8f7632cb58aaa80da6e85423e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 22 May 2022 10:19:18 -0400 Subject: [PATCH 240/632] block pg8000 >=1.29 Issue at https://github.com/tlocke/pg8000/issues/111 prevents savepoints from being usable. Change-Id: Ic689cf065c47aea5a146d30c47eb9bbfe8375692 (cherry picked from commit 8ec93170d1434ca7e2a1506a3c962d40b8658183) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 85c4796aee8..10fab0bbfbc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,7 +63,7 @@ oracle = cx_oracle>=7,<8;python_version<"3" cx_oracle>=7;python_version>="3" postgresql = psycopg2>=2.7 -postgresql_pg8000 = pg8000>=1.16.6 +postgresql_pg8000 = pg8000>=1.16.6,<1.29 postgresql_asyncpg = %(asyncio)s asyncpg;python_version>="3" From d29cea6171e2a752175fb76a55afa725f538bf7e Mon Sep 17 00:00:00 2001 From: Andrew Brookins Date: Sun, 22 May 2022 16:24:15 -0400 Subject: [PATCH 241/632] Add a note on using server_onupdate=FetchedValue() Add a note on using `server_onupdate=FetchedValue()` when using SQL expressions with `onupdate`. My team encountered an issue with using a SQL expression with `onupdate`. Despite the dialect (PG) supporting `RETURNING`, we needed to mark the column with `server_onupdate=FetchedValue()` in order to get the column used with `onupdate` to appear in the `RETURNING` clause of `UPDATE` statements. This was not clear from the documentation, so I want to make it crystal clear for other folks defining similar columns. Closes: #7437 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7437 Pull-request-sha: 4845fb09a90ab58f0ae882e0d335ddba09b32ca0 Change-Id: I272278830c8f3f42d057ff77c3209e87df7adf02 (cherry picked from commit 0487cd1678458b786d4beca5ae3a9c8e343c3763) --- doc/build/core/defaults.rst | 2 + doc/build/orm/persistence_techniques.rst | 57 +++++++++++++++++++++++- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index e2e71ea00fd..6c3d3ed7c7c 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -175,6 +175,8 @@ and returned alone. by offering the service of organizing multiple VALUES clauses into individual parameter dictionaries. +.. _defaults_client_invoked_sql: + Client-Invoked SQL Expressions ------------------------------ diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 38f289058b6..9815605b2ce 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -280,7 +280,8 @@ Case 1: non primary key, RETURNING or equivalent is supported In this case, columns should be marked as :class:`.FetchedValue` or with an explicit :paramref:`_schema.Column.server_default`. The -:paramref:`.orm.mapper.eager_defaults` flag may be used to indicate that these +:paramref:`_orm.mapper.eager_defaults` parameter +may be used to indicate that these columns should be fetched immediately upon INSERT and sometimes UPDATE:: @@ -306,6 +307,7 @@ above table will look like: INSERT INTO my_table DEFAULT VALUES RETURNING my_table.id, my_table.timestamp, my_table.special_identifier + Case 2: non primary key, RETURNING or equivalent is not supported or not needed -------------------------------------------------------------------------------- @@ -497,9 +499,62 @@ The above mapping upon INSERT will look like: :ref:`metadata_defaults_toplevel` +Notes on eagerly fetching client invoked SQL expressions used for INSERT or UPDATE +----------------------------------------------------------------------------------- + +The preceding examples indicate the use of :paramref:`_schema.Column.server_default` +to create tables that include default-generation functions within their +DDL. + +SQLAlchemy also supports non-DDL server side defaults, as documented at +:ref:`defaults_client_invoked_sql`; these "client invoked SQL expressions" +are set up using the :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate` parameters. + +These SQL expressions currently are subject to the same limitations within the +ORM as occurs for true server-side defaults; they won't be eagerly fetched with +RETURNING when using :paramref:`_orm.mapper.eager_defaults` unless the +:class:`.FetchedValue` directive is associated with the +:class:`_schema.Column`, even though these expressions are not DDL server +defaults and are actively rendered by SQLAlchemy itself. This limitation may be +addressed in future SQLAlchemy releases. + +The :class:`.FetchedValue` construct can be applied to +:paramref:`_schema.Column.server_default` or +:paramref:`_schema.Column.server_onupdate` at the same time that a SQL +expression is used with :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate`, such as in the example below where the +``func.now()`` construct is used as a client-invoked SQL expression +for :paramref:`_schema.Column.default` and +:paramref:`_schema.Column.onupdate`. In order for the behavior of +:paramref:`_orm.mapper.eager_defaults` to include that it fetches these +values using RETURNING when available, :paramref:`_schema.Column.server_default` and +:paramref:`_schema.Column.server_onupdate` are used with :class:`.FetchedValue` +to ensure that the fetch occurs:: + + class MyModel(Base): + __tablename__ = 'my_table' + + id = Column(Integer, primary_key=True) + + created = Column(DateTime(), default=func.now(), server_default=FetchedValue()) + updated = Column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue()) + + __mapper_args__ = {"eager_defaults": True} + +With a mapping similar to the above, the SQL rendered by the ORM for +INSERT and UPDATE will include ``created`` and ``updated`` in the RETURNING +clause:: + + INSERT INTO my_table (created) VALUES (now()) RETURNING my_table.id, my_table.created, my_table.updated + + UPDATE my_table SET updated=now() WHERE my_table.id = %(my_table_id)s RETURNING my_table.updated + + .. _orm_dml_returning_objects: + Using INSERT, UPDATE and ON CONFLICT (i.e. upsert) to return ORM Objects ========================================================================== From 39f7831b190c15cf288f29f8eabf9aecedb8463b Mon Sep 17 00:00:00 2001 From: Doctor Date: Sun, 22 May 2022 16:05:25 -0400 Subject: [PATCH 242/632] Format code in documentation Closes: #7959 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/7959 Pull-request-sha: fd8f60fcfe9cda0c2ba6dc9ddd171bf85a180295 Change-Id: I9a96c6e3e56cfd550672db4c1da4d68a961f970a (cherry picked from commit eea04615536215a4651ed0c9877cf40d7bc7c12e) --- doc/build/orm/backref.rst | 84 ++-- doc/build/orm/basic_relationships.rst | 228 ++++++----- doc/build/orm/cascades.rst | 64 +-- doc/build/orm/collections.rst | 114 +++--- doc/build/orm/composites.rst | 83 ++-- doc/build/orm/constructors.rst | 3 +- doc/build/orm/declarative_config.rst | 85 ++-- doc/build/orm/declarative_mixins.rst | 180 +++++---- doc/build/orm/declarative_styles.rst | 68 ++-- doc/build/orm/declarative_tables.rst | 103 ++--- doc/build/orm/extensions/associationproxy.rst | 173 +++++---- doc/build/orm/extensions/asyncio.rst | 64 +-- doc/build/orm/extensions/baked.rst | 47 +-- doc/build/orm/extensions/mypy.rst | 96 ++--- doc/build/orm/inheritance.rst | 366 ++++++++++-------- 15 files changed, 1001 insertions(+), 757 deletions(-) diff --git a/doc/build/orm/backref.rst b/doc/build/orm/backref.rst index 65d19eb185c..f52b868f8d9 100644 --- a/doc/build/orm/backref.rst +++ b/doc/build/orm/backref.rst @@ -7,24 +7,25 @@ The :paramref:`_orm.relationship.backref` keyword argument was first introduced mentioned throughout many of the examples here. What does it actually do ? Let's start with the canonical ``User`` and ``Address`` scenario:: - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will @@ -35,24 +36,25 @@ In fact, the :paramref:`_orm.relationship.backref` keyword is only a common shor of an event listener on both sides which will mirror attribute operations in both directions. The above configuration is equivalent to:: - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) user = relationship("User", back_populates="addresses") @@ -119,27 +121,31 @@ or a one-to-many or many-to-one which has a :paramref:`_orm.relationship.primary :paramref:`_orm.relationship.primaryjoin` argument is discussed in :ref:`relationship_primaryjoin`). Such as if we limited the list of ``Address`` objects to those which start with "tony":: - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.email.startswith('tony'))", - backref="user") + addresses = relationship( + "Address", + primaryjoin=( + "and_(User.id==Address.user_id, Address.email.startswith('tony'))" + ), + backref="user", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) We can observe, by inspecting the resulting property, that both sides of the relationship have this join condition applied:: @@ -171,13 +177,16 @@ the :func:`.backref` function in place of a string:: # from sqlalchemy.orm import backref + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses = relationship("Address", - backref=backref("user", lazy="joined")) + addresses = relationship( + "Address", + backref=backref("user", lazy="joined"), + ) Where above, we placed a ``lazy="joined"`` directive only on the ``Address.user`` side, indicating that when a query against ``Address`` is made, a join to the ``User`` @@ -271,26 +280,31 @@ present, due to the filtering condition. But we can do away with this unwanted of the "backref" behavior on the Python side by using two separate :func:`_orm.relationship` constructs, placing :paramref:`_orm.relationship.back_populates` only on one side:: - from sqlalchemy import Integer, ForeignKey, String, Column - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.email.startswith('tony'))", - back_populates="user") + + addresses = relationship( + "Address", + primaryjoin="and_(User.id==Address.user_id, " + "Address.email.startswith('tony'))", + back_populates="user", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) + user = relationship("User") With the above scenario, appending an ``Address`` object to the ``.addresses`` diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index 40b3590b6fa..ad57d4ca079 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -7,13 +7,11 @@ A quick walkthrough of the basic relational patterns. The imports used for each of the following sections is as follows:: - from sqlalchemy import Table, Column, Integer, ForeignKey - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, Table + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() - .. _relationship_patterns_o2m: One To Many @@ -24,28 +22,30 @@ the parent. :func:`_orm.relationship` is then specified on the parent, as refer a collection of items represented by the child:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent.id")) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify an additional :func:`_orm.relationship` and connect the two using the :paramref:`_orm.relationship.back_populates` parameter:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", back_populates="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent.id")) parent = relationship("Parent", back_populates="children") ``Child`` will get a ``parent`` attribute with many-to-one semantics. @@ -55,7 +55,7 @@ on a single :func:`_orm.relationship` instead of using :paramref:`_orm.relationship.back_populates`:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") @@ -88,13 +88,14 @@ Many to one places a foreign key in the parent table referencing the child. attribute will be created:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child.id")) child = relationship("Child") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by adding a second :func:`_orm.relationship` @@ -102,13 +103,14 @@ and applying the :paramref:`_orm.relationship.back_populates` parameter in both directions:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child.id")) child = relationship("Child", back_populates="parents") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) parents = relationship("Parent", back_populates="child") @@ -116,9 +118,9 @@ Alternatively, the :paramref:`_orm.relationship.backref` parameter may be applied to a single :func:`_orm.relationship`, such as ``Parent.child``:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey('child.id')) + child_id = Column(Integer, ForeignKey("child.id")) child = relationship("Child", backref="parents") .. _relationships_one_to_one: @@ -143,16 +145,17 @@ a :ref:`many-to-one ` (``Child.parent``) relationships:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) # one-to-many collection children = relationship("Child", back_populates="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent.id")) # many-to-one scalar parent = relationship("Parent", back_populates="children") @@ -164,17 +167,18 @@ is converted into a scalar relationship using the ``uselist=False`` flag, renaming ``Parent.children`` to ``Parent.child`` for clarity:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) # previously one-to-many Parent.children is now # one-to-one Parent.child child = relationship("Child", back_populates="parent", uselist=False) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent.id")) # many-to-one side remains, see tip below parent = relationship("Parent", back_populates="child") @@ -212,18 +216,18 @@ in this case the ``uselist`` parameter:: from sqlalchemy.orm import backref + class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id')) + parent_id = Column(Integer, ForeignKey("parent.id")) parent = relationship("Parent", backref=backref("child", uselist=False)) - - .. _relationships_many_to_many: Many To Many @@ -236,19 +240,22 @@ table is indicated by the :paramref:`_orm.relationship.secondary` argument to class, so that the :class:`_schema.ForeignKey` directives can locate the remote tables with which to link:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id')), - Column('right_id', ForeignKey('right.id')) + association_table = Table( + "association", + Base.metadata, + Column("left_id", ForeignKey("left.id")), + Column("right_id", ForeignKey("right.id")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=association_table) + children = relationship("Child", secondary=association_table) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) .. tip:: @@ -263,55 +270,64 @@ remote tables with which to link:: this ensures that duplicate rows won't be persisted within the table regardless of issues on the application side:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association", + Base.metadata, + Column("left_id", ForeignKey("left.id"), primary_key=True), + Column("right_id", ForeignKey("right.id"), primary_key=True), ) For a bidirectional relationship, both sides of the relationship contain a collection. Specify using :paramref:`_orm.relationship.back_populates`, and for each :func:`_orm.relationship` specify the common association table:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association", + Base.metadata, + Column("left_id", ForeignKey("left.id"), primary_key=True), + Column("right_id", ForeignKey("right.id"), primary_key=True), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship( - "Child", - secondary=association_table, - back_populates="parents") + "Child", secondary=association_table, back_populates="parents" + ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship( - "Parent", - secondary=association_table, - back_populates="children") + "Parent", secondary=association_table, back_populates="children" + ) + When using the :paramref:`_orm.relationship.backref` parameter instead of :paramref:`_orm.relationship.back_populates`, the backref will automatically use the same :paramref:`_orm.relationship.secondary` argument for the reverse relationship:: - association_table = Table('association', Base.metadata, - Column('left_id', ForeignKey('left.id'), primary_key=True), - Column('right_id', ForeignKey('right.id'), primary_key=True) + association_table = Table( + "association", + Base.metadata, + Column("left_id", ForeignKey("left.id"), primary_key=True), + Column("right_id", ForeignKey("right.id"), primary_key=True), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=association_table, - backref="parents") + children = relationship( + "Child", secondary=association_table, backref="parents" + ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) The :paramref:`_orm.relationship.secondary` argument of @@ -321,21 +337,21 @@ can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary=lambda: association_table, - backref="parents") + children = relationship( + "Child", + secondary=lambda: association_table, + backref="parents", + ) With the declarative extension in use, the traditional "string name of the table" is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship("Child", - secondary="association", - backref="parents") + children = relationship("Child", secondary="association", backref="parents") .. warning:: When passed as a Python-evaluable string, the :paramref:`_orm.relationship.secondary` argument is interpreted using Python's @@ -421,39 +437,43 @@ is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): - __tablename__ = 'association' - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + __tablename__ = "association" + left_id = Column(ForeignKey("left.id"), primary_key=True) + right_id = Column(ForeignKey("right.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship("Association") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) As always, the bidirectional version makes use of :paramref:`_orm.relationship.back_populates` or :paramref:`_orm.relationship.backref`:: class Association(Base): - __tablename__ = 'association' - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + __tablename__ = "association" + left_id = Column(ForeignKey("left.id"), primary_key=True) + right_id = Column(ForeignKey("right.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", back_populates="parents") parent = relationship("Parent", back_populates="children") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship("Association", back_populates="parent") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship("Association", back_populates="child") @@ -494,23 +514,25 @@ associated object, and a second to a target attribute. after :meth:`.Session.commit`:: class Association(Base): - __tablename__ = 'association' + __tablename__ = "association" - left_id = Column(ForeignKey('left.id'), primary_key=True) - right_id = Column(ForeignKey('right.id'), primary_key=True) + left_id = Column(ForeignKey("left.id"), primary_key=True) + right_id = Column(ForeignKey("right.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", backref="parent_associations") parent = relationship("Parent", backref="child_associations") + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship("Child", secondary="association") + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) Additionally, just as changes to one relationship aren't reflected in the @@ -548,6 +570,7 @@ classes using a string name, rather than the class itself:: children = relationship("Child", back_populates="parent") + class Child(Base): # ... @@ -584,7 +607,7 @@ package, including expression functions like :func:`_sql.desc` and children = relationship( "Child", order_by="desc(Child.email_address)", - primaryjoin="Parent.id == Child.parent_id" + primaryjoin="Parent.id == Child.parent_id", ) For the case where more than one module contains a class of the same name, @@ -597,7 +620,7 @@ within any of these string expressions:: children = relationship( "myapp.mymodel.Child", order_by="desc(myapp.mymodel.Child.email_address)", - primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id" + primaryjoin="myapp.mymodel.Parent.id == myapp.mymodel.Child.parent_id", ) The qualified path can be any partial path that removes ambiguity between @@ -611,7 +634,7 @@ we can specify ``model1.Child`` or ``model2.Child``:: children = relationship( "model1.Child", order_by="desc(mymodel1.Child.email_address)", - primaryjoin="Parent.id == model1.Child.parent_id" + primaryjoin="Parent.id == model1.Child.parent_id", ) The :func:`_orm.relationship` construct also accepts Python functions or @@ -622,9 +645,12 @@ A Python functional approach might look like the following:: from sqlalchemy import desc + def _resolve_child_model(): - from myapplication import Child - return Child + from myapplication import Child + + return Child + class Parent(Base): # ... @@ -632,7 +658,7 @@ A Python functional approach might look like the following:: children = relationship( _resolve_child_model(), order_by=lambda: desc(_resolve_child_model().email_address), - primaryjoin=lambda: Parent.id == _resolve_child_model().parent_id + primaryjoin=lambda: Parent.id == _resolve_child_model().parent_id, ) The full list of parameters which accept Python functions/lambdas or strings @@ -674,23 +700,23 @@ class were available, we could also apply it afterwards:: # first, module A, where Child has not been created yet, # we create a Parent class which knows nothing about Child + class Parent(Base): - # ... + ... + + # ... later, in Module B, which is imported after module A: - #... later, in Module B, which is imported after module A: class Child(Base): - # ... + ... + from module_a import Parent # assign the User.addresses relationship as a class variable. The # declarative base class will intercept this and map the relationship. - Parent.children = relationship( - Child, - primaryjoin=Child.parent_id==Parent.id - ) + Parent.children = relationship(Child, primaryjoin=Child.parent_id == Parent.id) .. note:: assignment of mapped properties to a declaratively mapped class will only function correctly if the "declarative base" class is used, which also @@ -718,13 +744,15 @@ declarative base and its :class:`_orm.registry`. We can then refer to this parameter:: keyword_author = Table( - 'keyword_author', Base.metadata, - Column('author_id', Integer, ForeignKey('authors.id')), - Column('keyword_id', Integer, ForeignKey('keywords.id')) - ) + "keyword_author", + Base.metadata, + Column("author_id", Integer, ForeignKey("authors.id")), + Column("keyword_id", Integer, ForeignKey("keywords.id")), + ) + class Author(Base): - __tablename__ = 'authors' + __tablename__ = "authors" id = Column(Integer, primary_key=True) keywords = relationship("Keyword", secondary="keyword_author") diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 1a2a7804c21..466c1975cec 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -22,7 +22,7 @@ Cascade behavior is configured using the :func:`~sqlalchemy.orm.relationship`:: class Order(Base): - __tablename__ = 'order' + __tablename__ = "order" items = relationship("Item", cascade="all, delete-orphan") customer = relationship("User", cascade="save-update") @@ -32,11 +32,11 @@ To set cascades on a backref, the same flag can be used with the its arguments back into :func:`~sqlalchemy.orm.relationship`:: class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" - order = relationship("Order", - backref=backref("items", cascade="all, delete-orphan") - ) + order = relationship( + "Order", backref=backref("items", cascade="all, delete-orphan") + ) .. sidebar:: The Origins of Cascade @@ -226,23 +226,27 @@ The following example adapts that of :ref:`relationships_many_to_many` to illustrate the ``cascade="all, delete"`` setting on **one** side of the association:: - association_table = Table('association', Base.metadata, - Column('left_id', Integer, ForeignKey('left.id')), - Column('right_id', Integer, ForeignKey('right.id')) + association_table = Table( + "association", + Base.metadata, + Column("left_id", Integer, ForeignKey("left.id")), + Column("right_id", Integer, ForeignKey("right.id")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship( "Child", secondary=association_table, back_populates="parents", - cascade="all, delete" + cascade="all, delete", ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship( "Parent", @@ -305,18 +309,20 @@ on the relevant ``FOREIGN KEY`` constraint as well:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship( - "Child", back_populates="parent", + "Child", + back_populates="parent", cascade="all, delete", - passive_deletes=True + passive_deletes=True, ) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('parent.id', ondelete="CASCADE")) + parent_id = Column(Integer, ForeignKey("parent.id", ondelete="CASCADE")) parent = relationship("Parent", back_populates="children") The behavior of the above configuration when a parent row is deleted @@ -455,13 +461,16 @@ on the parent->child side of the relationship, and we can then configure ``passive_deletes=True`` on the **other** side of the bidirectional relationship as illustrated below:: - association_table = Table('association', Base.metadata, - Column('left_id', Integer, ForeignKey('left.id', ondelete="CASCADE")), - Column('right_id', Integer, ForeignKey('right.id', ondelete="CASCADE")) + association_table = Table( + "association", + Base.metadata, + Column("left_id", Integer, ForeignKey("left.id", ondelete="CASCADE")), + Column("right_id", Integer, ForeignKey("right.id", ondelete="CASCADE")), ) + class Parent(Base): - __tablename__ = 'left' + __tablename__ = "left" id = Column(Integer, primary_key=True) children = relationship( "Child", @@ -470,14 +479,15 @@ relationship as illustrated below:: cascade="all, delete", ) + class Child(Base): - __tablename__ = 'right' + __tablename__ = "right" id = Column(Integer, primary_key=True) parents = relationship( "Parent", secondary=association_table, back_populates="children", - passive_deletes=True + passive_deletes=True, ) Using the above configuration, the deletion of a ``Parent`` object proceeds @@ -682,12 +692,11 @@ parent collection. The ``delete-orphan`` cascade accomplishes this, as illustrated in the example below:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" # ... - addresses = relationship( - "Address", cascade="all, delete-orphan") + addresses = relationship("Address", cascade="all, delete-orphan") # ... @@ -709,9 +718,8 @@ that this related object is not to shared with any other parent simultaneously:: # ... preference = relationship( - "Preference", cascade="all, delete-orphan", - single_parent=True) - + "Preference", cascade="all, delete-orphan", single_parent=True + ) Above, if a hypothetical ``Preference`` object is removed from a ``User``, it will be deleted on flush:: diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index b09281501a5..1c40e795589 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -48,14 +48,15 @@ when accessed. Filtering criterion may be applied as well as limits and offsets, either explicitly or via array slices:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" posts = relationship(Post, lazy="dynamic") - jack = session.query(User).get(id) + + jack = session.get(User, id) # filter Jack's blog posts - posts = jack.posts.filter(Post.headline=='this is a post') + posts = jack.posts.filter(Post.headline == "this is a post") # apply array slices posts = jack.posts[5:20] @@ -63,10 +64,10 @@ offsets, either explicitly or via array slices:: The dynamic relationship supports limited write operations, via the :meth:`_orm.AppenderQuery.append` and :meth:`_orm.AppenderQuery.remove` methods:: - oldpost = jack.posts.filter(Post.headline=='old post').one() + oldpost = jack.posts.filter(Post.headline == "old post").one() jack.posts.remove(oldpost) - jack.posts.append(Post('new post')) + jack.posts.append(Post("new post")) Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible @@ -81,9 +82,7 @@ function in conjunction with ``lazy='dynamic'``:: class Post(Base): __table__ = posts_table - user = relationship(User, - backref=backref('posts', lazy='dynamic') - ) + user = relationship(User, backref=backref("posts", lazy="dynamic")) Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time. @@ -111,9 +110,9 @@ A "noload" relationship never loads from the database, even when accessed. It is configured using ``lazy='noload'``:: class MyClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" - children = relationship(MyOtherClass, lazy='noload') + children = relationship(MyOtherClass, lazy="noload") Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the @@ -127,9 +126,9 @@ Alternatively, a "raise"-loaded relationship will raise an emit a lazy load:: class MyClass(Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" - children = relationship(MyOtherClass, lazy='raise') + children = relationship(MyOtherClass, lazy="raise") Above, attribute access on the ``children`` collection will raise an exception if it was not previously eagerloaded. This includes read access but for @@ -166,11 +165,12 @@ values accessible through an attribute on the parent instance. By default, this collection is a ``list``:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" parent_id = Column(Integer, primary_key=True) children = relationship(Child) + parent = Parent() parent.children.append(Child()) print(parent.children[0]) @@ -181,12 +181,13 @@ default list, by specifying the :paramref:`_orm.relationship.collection_class` o :func:`~sqlalchemy.orm.relationship`:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" parent_id = Column(Integer, primary_key=True) # use a set children = relationship(Child, collection_class=set) + parent = Parent() child = Child() parent.children.add(child) @@ -203,24 +204,27 @@ to achieve a simple dictionary collection. It produces a dictionary class that of the mapped class as a key. Below we map an ``Item`` class containing a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: - from sqlalchemy import Column, Integer, String, ForeignKey - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection - from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=attribute_mapped_collection('keyword'), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=attribute_mapped_collection("keyword"), + cascade="all, delete-orphan", + ) + class Note(Base): - __tablename__ = 'note' + __tablename__ = "note" id = Column(Integer, primary_key=True) - item_id = Column(Integer, ForeignKey('item.id'), nullable=False) + item_id = Column(Integer, ForeignKey("item.id"), nullable=False) keyword = Column(String) text = Column(String) @@ -231,7 +235,7 @@ a dictionary of ``Note`` items keyed to the ``Note.keyword`` attribute:: ``Item.notes`` is then a dictionary:: >>> item = Item() - >>> item.notes['a'] = Note('a', 'atext') + >>> item.notes["a"] = Note("a", "atext") >>> item.notes.items() {'a': <__main__.Note object at 0x2eaaf0>} @@ -242,9 +246,9 @@ key we supply must match that of the actual ``Note`` object:: item = Item() item.notes = { - 'a': Note('a', 'atext'), - 'b': Note('b', 'btext') - } + "a": Note("a", "atext"), + "b": Note("b", "btext"), + } The attribute which :func:`.attribute_mapped_collection` uses as a key does not need to be mapped at all! Using a regular Python ``@property`` allows virtually @@ -253,17 +257,20 @@ below when we establish it as a tuple of ``Note.keyword`` and the first ten lett of the ``Note.text`` field:: class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=attribute_mapped_collection('note_key'), - backref="item", - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=attribute_mapped_collection("note_key"), + backref="item", + cascade="all, delete-orphan", + ) + class Note(Base): - __tablename__ = 'note' + __tablename__ = "note" id = Column(Integer, primary_key=True) - item_id = Column(Integer, ForeignKey('item.id'), nullable=False) + item_id = Column(Integer, ForeignKey("item.id"), nullable=False) keyword = Column(String) text = Column(String) @@ -290,12 +297,15 @@ object directly:: from sqlalchemy.orm.collections import column_mapped_collection + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=column_mapped_collection(Note.__table__.c.keyword), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=column_mapped_collection(Note.__table__.c.keyword), + cascade="all, delete-orphan", + ) as well as :func:`.mapped_collection` which is passed any callable function. Note that it's usually easier to use :func:`.attribute_mapped_collection` along @@ -303,12 +313,15 @@ with a ``@property`` as mentioned earlier:: from sqlalchemy.orm.collections import mapped_collection + class Item(Base): - __tablename__ = 'item' + __tablename__ = "item" id = Column(Integer, primary_key=True) - notes = relationship("Note", - collection_class=mapped_collection(lambda note: note.text[0:10]), - cascade="all, delete-orphan") + notes = relationship( + "Note", + collection_class=mapped_collection(lambda note: note.text[0:10]), + cascade="all, delete-orphan", + ) Dictionary mappings are often combined with the "Association Proxy" extension to produce streamlined dictionary views. See :ref:`proxying_dictionaries` and :ref:`composite_association_proxy` @@ -357,7 +370,7 @@ if the value of ``B.data`` is not set yet, the key will be ``None``:: Setting ``b1.data`` after the fact does not update the collection:: - >>> b1.data = 'the key' + >>> b1.data = "the key" >>> a1.bs {None: } @@ -365,14 +378,14 @@ Setting ``b1.data`` after the fact does not update the collection:: This can also be seen if one attempts to set up ``B()`` in the constructor. The order of arguments changes the result:: - >>> B(a=a1, data='the key') + >>> B(a=a1, data="the key") >>> a1.bs {None: } vs:: - >>> B(data='the key', a=a1) + >>> B(data="the key", a=a1) >>> a1.bs {'the key': } @@ -384,9 +397,9 @@ An event handler such as the following may also be used to track changes in the collection as well:: from sqlalchemy import event - from sqlalchemy.orm import attributes + @event.listens_for(B.data, "set") def set_item(obj, value, previous, initiator): if obj.a is not None: @@ -394,8 +407,6 @@ collection as well:: obj.a.bs[value] = obj obj.a.bs.pop(previous) - - .. autofunction:: attribute_mapped_collection .. autofunction:: column_mapped_collection @@ -585,8 +596,8 @@ from within an already instrumented call can cause events to be fired off repeatedly, or inappropriately, leading to internal state corruption in rare cases:: - from sqlalchemy.orm.collections import MappedCollection,\ - collection + from sqlalchemy.orm.collections import MappedCollection, collection + class MyMappedCollection(MappedCollection): """Use @internally_instrumented when your methods @@ -618,7 +629,8 @@ Iteration will go through ``itervalues()`` unless otherwise decorated. of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` can be used:: - from sqlalchemy.orm.collections import _instrument_class, MappedCollection + from sqlalchemy.orm.collections import MappedCollection, _instrument_class + _instrument_class(MappedCollection) This will ensure that the :class:`.MappedCollection` has been properly diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index fb3ca476783..69fc93622b2 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -21,12 +21,14 @@ A simple example represents pairs of columns as a ``Point`` object. return self.x, self.y def __repr__(self): - return "Point(x=%r, y=%r)" % (self.x, self.y) + return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return isinstance(other, Point) and \ - other.x == self.x and \ - other.y == self.y + return ( + isinstance(other, Point) + and other.x == self.x + and other.y == self.y + ) def __ne__(self, other): return not self.__eq__(other) @@ -44,13 +46,13 @@ objects. Then, the :func:`.composite` function is used to assign new attributes that will represent sets of columns via the ``Point`` class:: from sqlalchemy import Column, Integer - from sqlalchemy.orm import composite - from sqlalchemy.ext.declarative import declarative_base + from sqlalchemy.orm import composite, declarative_base Base = declarative_base() + class Vertex(Base): - __tablename__ = 'vertices' + __tablename__ = "vertices" id = Column(Integer, primary_key=True) x1 = Column(Integer) @@ -64,10 +66,14 @@ attributes that will represent sets of columns via the ``Point`` class:: A classical mapping above would define each :func:`.composite` against the existing table:: - mapper_registry.map_imperatively(Vertex, vertices_table, properties={ - 'start':composite(Point, vertices_table.c.x1, vertices_table.c.y1), - 'end':composite(Point, vertices_table.c.x2, vertices_table.c.y2), - }) + mapper_registry.map_imperatively( + Vertex, + vertices_table, + properties={ + "start": composite(Point, vertices_table.c.x1, vertices_table.c.y1), + "end": composite(Point, vertices_table.c.x2, vertices_table.c.y2), + }, + ) We can now persist and use ``Vertex`` instances, as well as query for them, using the ``.start`` and ``.end`` attributes against ad-hoc ``Point`` instances: @@ -118,19 +124,27 @@ to define existing or new operations. Below we illustrate the "greater than" operator, implementing the same expression that the base "greater than" does:: - from sqlalchemy.orm.properties import CompositeProperty from sqlalchemy import sql + from sqlalchemy.orm.properties import CompositeProperty + class PointComparator(CompositeProperty.Comparator): def __gt__(self, other): """redefine the 'greater than' operation""" - return sql.and_(*[a>b for a, b in - zip(self.__clause_element__().clauses, - other.__composite_values__())]) + return sql.and_( + *[ + a > b + for a, b in zip( + self.__clause_element__().clauses, + other.__composite_values__(), + ) + ] + ) + class Vertex(Base): - ___tablename__ = 'vertices' + ___tablename__ = "vertices" id = Column(Integer, primary_key=True) x1 = Column(Integer) @@ -138,10 +152,8 @@ the same expression that the base "greater than" does:: x2 = Column(Integer) y2 = Column(Integer) - start = composite(Point, x1, y1, - comparator_factory=PointComparator) - end = composite(Point, x2, y2, - comparator_factory=PointComparator) + start = composite(Point, x1, y1, comparator_factory=PointComparator) + end = composite(Point, x2, y2, comparator_factory=PointComparator) Nesting Composites ------------------- @@ -155,7 +167,8 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: from sqlalchemy.orm import composite - class Point(object): + + class Point: def __init__(self, x, y): self.x = x self.y = y @@ -164,17 +177,20 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return self.x, self.y def __repr__(self): - return "Point(x=%r, y=%r)" % (self.x, self.y) + return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return isinstance(other, Point) and \ - other.x == self.x and \ - other.y == self.y + return ( + isinstance(other, Point) + and other.x == self.x + and other.y == self.y + ) def __ne__(self, other): return not self.__eq__(other) - class Vertex(object): + + class Vertex: def __init__(self, start, end): self.start = start self.end = end @@ -182,18 +198,17 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: @classmethod def _generate(self, x1, y1, x2, y2): """generate a Vertex from a row""" - return Vertex( - Point(x1, y1), - Point(x2, y2) - ) + return Vertex(Point(x1, y1), Point(x2, y2)) def __composite_values__(self): - return \ - self.start.__composite_values__() + \ - self.end.__composite_values__() + return ( + self.start.__composite_values__() + + self.end.__composite_values__() + ) + class HasVertex(Base): - __tablename__ = 'has_vertex' + __tablename__ = "has_vertex" id = Column(Integer, primary_key=True) x1 = Column(Integer) y1 = Column(Integer) diff --git a/doc/build/orm/constructors.rst b/doc/build/orm/constructors.rst index b78b0f0cb97..f03ce3a1a38 100644 --- a/doc/build/orm/constructors.rst +++ b/doc/build/orm/constructors.rst @@ -29,7 +29,8 @@ useful for recreating transient properties that are normally assigned in from sqlalchemy import orm - class MyMappedClass(object): + + class MyMappedClass: def __init__(self, data): self.data = data # we need stuff on all instances, but not in the database. diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 9240d9011b2..2386b6bcd1a 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -42,14 +42,19 @@ objects but also relationships and SQL expressions:: # mapping attributes using declarative with declarative table # i.e. __tablename__ - from sqlalchemy import Column, Integer, String, Text, ForeignKey - from sqlalchemy.orm import column_property, relationship, deferred - from sqlalchemy.orm import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, String, Text + from sqlalchemy.orm import ( + column_property, + declarative_base, + deferred, + relationship, + ) Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -60,8 +65,9 @@ objects but also relationships and SQL expressions:: addresses = relationship("Address", back_populates="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -90,13 +96,17 @@ hybrid table style:: # mapping attributes using declarative with imperative table # i.e. __table__ - from sqlalchemy import Table - from sqlalchemy import Column, Integer, String, Text, ForeignKey - from sqlalchemy.orm import column_property, relationship, deferred - from sqlalchemy.orm import declarative_base + from sqlalchemy import Column, ForeignKey, Integer, String, Table, Text + from sqlalchemy.orm import ( + column_property, + declarative_base, + deferred, + relationship, + ) Base = declarative_base() + class User(Base): __table__ = Table( "user", @@ -104,13 +114,16 @@ hybrid table style:: Column("id", Integer, primary_key=True), Column("name", String), Column("firstname", String(50)), - Column("lastname", String(50)) + Column("lastname", String(50)), ) - fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname) + fullname = column_property( + __table__.c.firstname + " " + __table__.c.lastname + ) addresses = relationship("Address", back_populates="user") + class Address(Base): __table__ = Table( "address", @@ -118,7 +131,7 @@ hybrid table style:: Column("id", Integer, primary_key=True), Column("user_id", ForeignKey("user.id")), Column("email_address", String), - Column("address_statistics", Text) + Column("address_statistics", Text), ) address_statistics = deferred(__table__.c.address_statistics) @@ -168,15 +181,16 @@ The :paramref:`_orm.mapper.version_id_col` and from datetime import datetime + class Widget(Base): - __tablename__ = 'widgets' + __tablename__ = "widgets" id = Column(Integer, primary_key=True) timestamp = Column(DateTime, nullable=False) __mapper_args__ = { - 'version_id_col': timestamp, - 'version_id_generator': lambda v:datetime.now() + "version_id_col": timestamp, + "version_id_generator": lambda v: datetime.now(), } **Single Table Inheritance** @@ -185,19 +199,20 @@ The :paramref:`_orm.mapper.polymorphic_on` and :paramref:`_orm.mapper.polymorphic_identity` parameters:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" person_id = Column(Integer, primary_key=True) type = Column(String, nullable=False) __mapper_args__ = dict( polymorphic_on=type, - polymorphic_identity="person" + polymorphic_identity="person", ) + class Employee(Person): __mapper_args__ = dict( - polymorphic_identity="employee" + polymorphic_identity="employee", ) The ``__mapper_args__`` dictionary may be generated from a class-bound @@ -268,31 +283,35 @@ be illustrated using :meth:`_orm.registry.mapped` as follows:: reg = registry() + class BaseOne: metadata = MetaData() + class BaseTwo: metadata = MetaData() + @reg.mapped class ClassOne: - __tablename__ = 't1' # will use reg.metadata + __tablename__ = "t1" # will use reg.metadata id = Column(Integer, primary_key=True) + @reg.mapped class ClassTwo(BaseOne): - __tablename__ = 't1' # will use BaseOne.metadata + __tablename__ = "t1" # will use BaseOne.metadata id = Column(Integer, primary_key=True) + @reg.mapped class ClassThree(BaseTwo): - __tablename__ = 't1' # will use BaseTwo.metadata + __tablename__ = "t1" # will use BaseTwo.metadata id = Column(Integer, primary_key=True) - .. versionchanged:: 1.4.3 The :meth:`_orm.registry.mapped` decorator will honor an attribute named ``.metadata`` on the class as an alternate :class:`_schema.MetaData` collection to be used in place of the @@ -322,24 +341,27 @@ subclasses to extend just from the special class:: __abstract__ = True def some_helpful_method(self): - "" + """""" @declared_attr def __mapper_args__(cls): - return {"helpful mapper arguments":True} + return {"helpful mapper arguments": True} + class MyMappedClass(SomeAbstractBase): - "" + pass One possible use of ``__abstract__`` is to use a distinct :class:`_schema.MetaData` for different bases:: Base = declarative_base() + class DefaultBase(Base): __abstract__ = True metadata = MetaData() + class OtherBase(Base): __abstract__ = True metadata = MetaData() @@ -352,7 +374,6 @@ created perhaps within distinct databases:: DefaultBase.metadata.create_all(some_engine) OtherBase.metadata.create_all(some_other_engine) - ``__table_cls__`` ~~~~~~~~~~~~~~~~~ @@ -363,10 +384,7 @@ to a :class:`_schema.Table` that one generates here:: class MyMixin(object): @classmethod def __table_cls__(cls, name, metadata_obj, *arg, **kw): - return Table( - "my_" + name, - metadata_obj, *arg, **kw - ) + return Table(f"my_{name}", metadata_obj, *arg, **kw) The above mixin would cause all :class:`_schema.Table` objects generated to include the prefix ``"my_"``, followed by the name normally specified using the @@ -386,15 +404,18 @@ such as, define as single-inheritance if there is no primary key present:: @classmethod def __table_cls__(cls, *arg, **kw): for obj in arg[1:]: - if (isinstance(obj, Column) and obj.primary_key) or \ - isinstance(obj, PrimaryKeyConstraint): + if (isinstance(obj, Column) and obj.primary_key) or isinstance( + obj, PrimaryKeyConstraint + ): return Table(*arg, **kw) return None + class Person(AutoTable, Base): id = Column(Integer, primary_key=True) + class Employee(Person): employee_name = Column(String) diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 9bb4c782e4d..a5a85a791ad 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -16,20 +16,20 @@ or :func:`_orm.declarative_base` functions. An example of some commonly mixed-in idioms is below:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class MyMixin: - @declared_attr def __tablename__(cls): return cls.__name__.lower() - __table_args__ = {'mysql_engine': 'InnoDB'} - __mapper_args__= {'always_refresh': True} + __table_args__ = {"mysql_engine": "InnoDB"} + __mapper_args__ = {"always_refresh": True} + + id = Column(Integer, primary_key=True) - id = Column(Integer, primary_key=True) class MyModel(MyMixin, Base): name = Column(String(1000)) @@ -69,21 +69,22 @@ section can also be applied to the base class itself, for patterns that should apply to all classes derived from a particular base. This is achieved using the ``cls`` argument of the :func:`_orm.declarative_base` function:: - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_base, declared_attr + class Base: @declared_attr def __tablename__(cls): return cls.__name__.lower() - __table_args__ = {'mysql_engine': 'InnoDB'} + __table_args__ = {"mysql_engine": "InnoDB"} - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) - from sqlalchemy.orm import declarative_base Base = declarative_base(cls=Base) + class MyModel(Base): name = Column(String(1000)) @@ -101,10 +102,11 @@ declaration:: class TimestampMixin: created_at = Column(DateTime, default=func.now()) + class MyModel(TimestampMixin, Base): - __tablename__ = 'test' + __tablename__ = "test" - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) name = Column(String(1000)) Where above, all declarative classes that include ``TimestampMixin`` @@ -135,6 +137,7 @@ patterns common to many classes can be defined as callables:: from sqlalchemy.orm import declared_attr + @declarative_mixin class ReferenceAddressMixin: @declared_attr @@ -190,16 +193,19 @@ reference a common target class via many-to-one:: def target(cls): return relationship("Target") + class Foo(RefTargetMixin, Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + class Bar(RefTargetMixin, Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) + class Target(Base): - __tablename__ = 'target' + __tablename__ = "target" id = Column(Integer, primary_key=True) @@ -220,16 +226,17 @@ Declarative will be using as it calls the methods on its own, thus using The canonical example is the primaryjoin condition that depends upon another mixed-in column:: - @declarative_mixin - class RefTargetMixin: + @declarative_mixin + class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship(Target, - primaryjoin=Target.id==cls.target_id # this is *incorrect* + return relationship( + Target, + primaryjoin=Target.id == cls.target_id, # this is *incorrect* ) Mapping a class using the above mixin, we will get an error like:: @@ -261,12 +268,12 @@ or alternatively, the string form (which ultimately generates a lambda):: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship("Target", - primaryjoin="Target.id==%s.target_id" % cls.__name__ + return relationship( + Target, primaryjoin=f"Target.id=={cls.__name__}.target_id" ) .. seealso:: @@ -285,11 +292,11 @@ requirement so that no reliance on copying is needed:: @declarative_mixin class SomethingMixin: - @declared_attr def dprop(cls): return deferred(Column(Integer)) + class Something(SomethingMixin, Base): __tablename__ = "something" @@ -300,14 +307,12 @@ the :class:`_orm.declared_attr` is invoked:: @declarative_mixin class SomethingMixin: x = Column(Integer) - y = Column(Integer) @declared_attr def x_plus_y(cls): return column_property(cls.x + cls.y) - .. versionchanged:: 1.0.0 mixin columns are copied to the final mapped class so that :class:`_orm.declared_attr` methods can access the actual column that will be mapped. @@ -324,15 +329,18 @@ target a different type of child object. Below is an :func:`.association_proxy` mixin example which provides a scalar list of string values to an implementing class:: - from sqlalchemy import Column, Integer, ForeignKey, String + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import declarative_base - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import relationship + from sqlalchemy.orm import ( + declarative_base, + declarative_mixin, + declared_attr, + relationship, + ) Base = declarative_base() + @declarative_mixin class HasStringCollection: @declared_attr @@ -341,9 +349,12 @@ string values to an implementing class:: __tablename__ = cls.string_table_name id = Column(Integer, primary_key=True) value = Column(String(50), nullable=False) - parent_id = Column(Integer, - ForeignKey('%s.id' % cls.__tablename__), - nullable=False) + parent_id = Column( + Integer, + ForeignKey(f"{cls.__tablename__}.id"), + nullable=False, + ) + def __init__(self, value): self.value = value @@ -351,16 +362,18 @@ string values to an implementing class:: @declared_attr def strings(cls): - return association_proxy('_strings', 'value') + return association_proxy("_strings", "value") + class TypeA(HasStringCollection, Base): - __tablename__ = 'type_a' - string_table_name = 'type_a_strings' + __tablename__ = "type_a" + string_table_name = "type_a_strings" id = Column(Integer(), primary_key=True) + class TypeB(HasStringCollection, Base): - __tablename__ = 'type_b' - string_table_name = 'type_b_strings' + __tablename__ = "type_b" + string_table_name = "type_b_strings" id = Column(Integer(), primary_key=True) Above, the ``HasStringCollection`` mixin produces a :func:`_orm.relationship` @@ -374,8 +387,8 @@ attribute of each ``StringAttribute`` instance. ``TypeA`` or ``TypeB`` can be instantiated given the constructor argument ``strings``, a list of strings:: - ta = TypeA(strings=['foo', 'bar']) - tb = TypeB(strings=['bat', 'bar']) + ta = TypeA(strings=["foo", "bar"]) + tb = TypeB(strings=["bat", "bar"]) This list will generate a collection of ``StringAttribute`` objects, which are persisted into a table that's @@ -411,8 +424,8 @@ correct answer for each. For example, to create a mixin that gives every class a simple table name based on class name:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class Tablename: @@ -420,14 +433,16 @@ name based on class name:: def __tablename__(cls): return cls.__name__.lower() + class Person(Tablename, Base): id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): __tablename__ = None - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} primary_language = Column(String(50)) Alternatively, we can modify our ``__tablename__`` function to return @@ -435,9 +450,12 @@ Alternatively, we can modify our ``__tablename__`` function to return the effect of those subclasses being mapped with single table inheritance against the parent:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import has_inherited_table + from sqlalchemy.orm import ( + declarative_mixin, + declared_attr, + has_inherited_table, + ) + @declarative_mixin class Tablename: @@ -447,14 +465,16 @@ against the parent:: return None return cls.__name__.lower() + class Person(Tablename, Base): id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} .. _mixin_inheritance_columns: @@ -473,17 +493,19 @@ a primary key:: class HasId: @declared_attr def id(cls): - return Column('id', Integer, primary_key=True) + return Column("id", Integer, primary_key=True) + class Person(HasId, Base): - __tablename__ = 'person' - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + __tablename__ = "person" + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): - __tablename__ = 'engineer' + __tablename__ = "engineer" primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} It is usually the case in joined-table inheritance that we want distinctly named columns on each subclass. However in this case, we may want to have @@ -498,19 +520,21 @@ function should be invoked **for each class in the hierarchy**, in *almost* @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('person.id'), primary_key=True) + return Column(ForeignKey("person.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class Person(HasIdMixin, Base): - __tablename__ = 'person' - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} + __tablename__ = "person" + discriminator = Column("type", String(50)) + __mapper_args__ = {"polymorphic_on": discriminator} + class Engineer(Person): - __tablename__ = 'engineer' + __tablename__ = "engineer" primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} .. warning:: @@ -537,19 +561,21 @@ define on the class itself. The here to create user-defined collation routines that pull from multiple collections:: - from sqlalchemy.orm import declarative_mixin - from sqlalchemy.orm import declared_attr + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class MySQLSettings: - __table_args__ = {'mysql_engine':'InnoDB'} + __table_args__ = {"mysql_engine": "InnoDB"} + @declarative_mixin class MyOtherMixin: - __table_args__ = {'info':'foo'} + __table_args__ = {"info": "foo"} + class MyModel(MySQLSettings, MyOtherMixin, Base): - __tablename__='my_model' + __tablename__ = "my_model" @declared_attr def __table_args__(cls): @@ -558,7 +584,7 @@ from multiple collections:: args.update(MyOtherMixin.__table_args__) return args - id = Column(Integer, primary_key=True) + id = Column(Integer, primary_key=True) Creating Indexes with Mixins ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -569,13 +595,17 @@ establish it as part of ``__table_args__``:: @declarative_mixin class MyMixin: - a = Column(Integer) - b = Column(Integer) + a = Column(Integer) + b = Column(Integer) @declared_attr def __table_args__(cls): - return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) + return ( + Index(f"test_idx_{cls.__tablename__}", "a", "b"), + ) + class MyModel(MyMixin, Base): - __tablename__ = 'atable' - c = Column(Integer,primary_key=True) + __tablename__ = "atable" + c = Column(Integer, primary_key=True) + diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index 284e5a1e61b..24d9eec5ecd 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -38,15 +38,16 @@ method:: With the declarative base class, new mapped classes are declared as subclasses of the base:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base # declarative base class Base = declarative_base() + # an example mapping using the base class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -120,25 +121,25 @@ a decorator. The :meth:`_orm.registry.mapped` function is a class decorator that can be applied to any Python class with no hierarchy in place. The Python class otherwise is configured in declarative style normally:: - from sqlalchemy import Column, Integer, String, Text, ForeignKey - - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String, Text + from sqlalchemy.orm import registry, relationship mapper_registry = registry() + @mapper_registry.mapped class User: - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) addresses = relationship("Address", back_populates="user") + @mapper_registry.mapped class Address: - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -154,8 +155,10 @@ if the decorator is applied to that class directly. For inheritance mappings, the decorator should be applied to each subclass:: from sqlalchemy.orm import registry + mapper_registry = registry() + @mapper_registry.mapped class Person: __tablename__ = "person" @@ -164,9 +167,8 @@ mappings, the decorator should be applied to each subclass:: type = Column(String, nullable=False) __mapper_args__ = { - "polymorphic_on": type, - "polymorphic_identity": "person" + "polymorphic_identity": "person", } @@ -177,7 +179,7 @@ mappings, the decorator should be applied to each subclass:: person_id = Column(ForeignKey("person.person_id"), primary_key=True) __mapper_args__ = { - "polymorphic_identity": "employee" + "polymorphic_identity": "employee", } Both the "declarative table" and "imperative table" styles of declarative @@ -243,18 +245,11 @@ An example of a mapping using ``@dataclass`` using from __future__ import annotations - from dataclasses import dataclass - from dataclasses import field - from typing import List - from typing import Optional + from dataclasses import dataclass, field + from typing import List, Optional - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import Table - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String, Table + from sqlalchemy.orm import registry, relationship mapper_registry = registry() @@ -276,12 +271,13 @@ An example of a mapping using ``@dataclass`` using nickname: Optional[str] = None addresses: List[Address] = field(default_factory=list) - __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") + __mapper_args__ = { # type: ignore + "properties": { + "addresses": relationship("Address"), } } + @mapper_registry.mapped @dataclass class Address: @@ -328,16 +324,11 @@ association:: from __future__ import annotations - from dataclasses import dataclass - from dataclasses import field + from dataclasses import dataclass, field from typing import List - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import registry, relationship mapper_registry = registry() @@ -388,7 +379,7 @@ example at :ref:`orm_declarative_mixins_relationships`:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): @@ -414,6 +405,7 @@ came from a mixin that is itself a dataclass, the form would be:: default_factory=list, metadata={"sa": lambda: relationship("Address")} ) + @dataclass class AddressMixin: __tablename__ = "address" @@ -428,13 +420,15 @@ came from a mixin that is itself a dataclass, the form would be:: default=None, metadata={"sa": Column(String(50))} ) + @mapper_registry.mapped class User(UserMixin): pass + @mapper_registry.mapped class Address(AddressMixin): - pass + pass .. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` @@ -449,10 +443,10 @@ Example Three - attrs with Imperative Table A mapping using ``@attr.s``, in conjunction with imperative table:: import attr + from sqlalchemy.orm import registry # other imports - from sqlalchemy.orm import registry mapper_registry = registry() @@ -474,8 +468,10 @@ A mapping using ``@attr.s``, in conjunction with imperative table:: nickname = attr.ib() addresses = attr.ib() + # other classes... + ``@dataclass`` and attrs_ mappings may also be used with classical mappings, i.e. with the :meth:`_orm.registry.map_imperatively` function. See the section :ref:`orm_imperative_dataclasses` for a similar example. diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index e935193c7d3..72a48078d24 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -29,13 +29,14 @@ With the declarative base class, the typical form of mapping includes an attribute ``__tablename__`` that indicates the name of a :class:`_schema.Table` that should be generated along with the mapping:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -114,29 +115,29 @@ The attribute can be specified in one of two forms. One is as a dictionary:: class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'mysql_engine':'InnoDB'} + __tablename__ = "sometable" + __table_args__ = {"mysql_engine": "InnoDB"} The other, a tuple, where each argument is positional (usually constraints):: class MyClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - ) + ForeignKeyConstraint(["id"], ["remote_table.id"]), + UniqueConstraint("foo"), + ) Keyword arguments can be specified with the above form by specifying the last argument as a dictionary:: class MyClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - {'autoload':True} - ) + ForeignKeyConstraint(["id"], ["remote_table.id"]), + UniqueConstraint("foo"), + {"autoload": True}, + ) A class may also specify the ``__table_args__`` declarative attribute, as well as the ``__tablename__`` attribute, in a dynamic style using the @@ -156,9 +157,8 @@ dictionary:: class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'schema': 'some_schema'} - + __tablename__ = "sometable" + __table_args__ = {"schema": "some_schema"} The schema name can also be applied to all :class:`_schema.Table` objects globally by using the :paramref:`_schema.MetaData.schema` parameter documented @@ -167,15 +167,15 @@ may be constructed separately and passed either to :func:`_orm.registry` or :func:`_orm.declarative_base`:: from sqlalchemy import MetaData + metadata_obj = MetaData(schema="some_schema") - Base = declarative_base(metadata = metadata_obj) + Base = declarative_base(metadata=metadata_obj) class MyClass(Base): # will use "some_schema" by default - __tablename__ = 'sometable' - + __tablename__ = "sometable" .. seealso:: @@ -191,7 +191,7 @@ The declarative table configuration allows the addition of new is that of simply assigning new :class:`_schema.Column` objects to the class:: - MyClass.some_new_column = Column('data', Unicode) + MyClass.some_new_column = Column("data", Unicode) The above operation performed against a declarative class that has been mapped using the declarative base (note, not the decorator form of declarative) @@ -231,9 +231,8 @@ object is produced separately and passed to the declarative process directly:: + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base - from sqlalchemy import Column, Integer, String, ForeignKey - Base = declarative_base() @@ -250,6 +249,7 @@ directly:: Column("nickname", String), ) + # construct the User class using this table. class User(Base): __table__ = user_table @@ -278,33 +278,40 @@ mapper configuration:: class Person(Base): __table__ = Table( - 'person', + "person", Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('type', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("type", String(50)), ) __mapper_args__ = { "polymorphic_on": __table__.c.type, - "polymorhpic_identity": "person" + "polymorhpic_identity": "person", } The "imperative table" form is also used when a non-:class:`_schema.Table` construct, such as a :class:`_sql.Join` or :class:`_sql.Subquery` object, is to be mapped. An example below:: - from sqlalchemy import select, func + from sqlalchemy import func, select - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select @@ -337,13 +344,16 @@ use a declarative hybrid mapping, passing the :paramref:`_schema.Table.autoload_with` parameter to the :class:`_schema.Table`:: - engine = create_engine("postgresql://user:pass@hostname/my_existing_database") + engine = create_engine( + "postgresql+psycopg2://user:pass@hostname/my_existing_database" + ) + class MyClass(Base): __table__ = Table( - 'mytable', + "mytable", Base.metadata, - autoload_with=engine + autoload_with=engine, ) A major downside of the above approach however is that it requires the database @@ -364,22 +374,25 @@ the reflection process against a target database, and will integrate the results with the declarative table mapping process, that is, classes which use the ``__tablename__`` attribute:: - from sqlalchemy.orm import declarative_base from sqlalchemy.ext.declarative import DeferredReflection + from sqlalchemy.orm import declarative_base Base = declarative_base() + class Reflected(DeferredReflection): __abstract__ = True + class Foo(Reflected, Base): - __tablename__ = 'foo' + __tablename__ = "foo" bars = relationship("Bar") + class Bar(Reflected, Base): - __tablename__ = 'bar' + __tablename__ = "bar" - foo_id = Column(Integer, ForeignKey('foo.id')) + foo_id = Column(Integer, ForeignKey("foo.id")) Above, we create a mixin class ``Reflected`` that will serve as a base for classes in our declarative hierarchy that should become mapped when @@ -387,7 +400,9 @@ the ``Reflected.prepare`` method is called. The above mapping is not complete until we do so, given an :class:`_engine.Engine`:: - engine = create_engine("postgresql://user:pass@hostname/my_existing_database") + engine = create_engine( + "postgresql+psycopg2://user:pass@hostname/my_existing_database" + ) Reflected.prepare(engine) The purpose of the ``Reflected`` class is to define the scope at which diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index 1344bc84a83..8e2b63910e2 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -24,13 +24,13 @@ Consider a many-to-many mapping between two classes, ``User`` and ``Keyword``. Each ``User`` can have any number of ``Keyword`` objects, and vice-versa (the many-to-many pattern is described at :ref:`relationships_many_to_many`):: - from sqlalchemy import Column, Integer, String, ForeignKey, Table + from sqlalchemy import Column, ForeignKey, Integer, String, Table from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) @@ -38,27 +38,29 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa def __init__(self, name): self.name = name + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword - userkeywords_table = Table('userkeywords', Base.metadata, - Column('user_id', Integer, ForeignKey("user.id"), - primary_key=True), - Column('keyword_id', Integer, ForeignKey("keyword.id"), - primary_key=True) + + userkeywords_table = Table( + "userkeywords", + Base.metadata, + Column("user_id", Integer, ForeignKey("user.id"), primary_key=True), + Column("keyword_id", Integer, ForeignKey("keyword.id"), primary_key=True), ) Reading and manipulating the collection of "keyword" strings associated with ``User`` requires traversal from each collection element to the ``.keyword`` attribute, which can be awkward:: - >>> user = User('jek') - >>> user.kw.append(Keyword('cheese-inspector')) + >>> user = User("jek") + >>> user.kw.append(Keyword("cheese-inspector")) >>> print(user.kw) [<__main__.Keyword object at 0x12bf830>] >>> print(user.kw[0].keyword) @@ -72,8 +74,9 @@ value of ``.keyword`` associated with each ``Keyword`` object:: from sqlalchemy.ext.associationproxy import association_proxy + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) kw = relationship("Keyword", secondary=lambda: userkeywords_table) @@ -82,17 +85,17 @@ value of ``.keyword`` associated with each ``Keyword`` object:: self.name = name # proxy the 'keyword' attribute from the 'kw' relationship - keywords = association_proxy('kw', 'keyword') + keywords = association_proxy("kw", "keyword") We can now reference the ``.keywords`` collection as a listing of strings, which is both readable and writable. New ``Keyword`` objects are created for us transparently:: - >>> user = User('jek') - >>> user.keywords.append('cheese-inspector') + >>> user = User("jek") + >>> user.keywords.append("cheese-inspector") >>> user.keywords ['cheese-inspector'] - >>> user.keywords.append('snack ninja') + >>> user.keywords.append("snack ninja") >>> user.kw [<__main__.Keyword object at 0x12cdd30>, <__main__.Keyword object at 0x12cde30>] @@ -121,11 +124,11 @@ assignment event) is intercepted by the association proxy, it instantiates a new instance of the "intermediary" object using its constructor, passing as a single argument the given value. In our example above, an operation like:: - user.keywords.append('cheese-inspector') + user.keywords.append("cheese-inspector") Is translated by the association proxy into the operation:: - user.kw.append(Keyword('cheese-inspector')) + user.kw.append(Keyword("cheese-inspector")) The example works here because we have designed the constructor for ``Keyword`` to accept a single positional argument, ``keyword``. For those cases where a @@ -138,8 +141,9 @@ singular argument. Below we illustrate this using a lambda as is typical:: # ... # use Keyword(keyword=kw) on append() events - keywords = association_proxy('kw', 'keyword', - creator=lambda kw: Keyword(keyword=kw)) + keywords = association_proxy( + "kw", "keyword", creator=lambda kw: Keyword(keyword=kw) + ) The ``creator`` function accepts a single argument in the case of a list- or set- based collection, or a scalar attribute. In the case of a dictionary-based @@ -166,35 +170,36 @@ create an association proxy on the ``User`` class called collection of ``User`` to the ``.keyword`` attribute present on each ``UserKeyword``:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import backref, declarative_base, relationship Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) # association proxy of "user_keywords" collection # to "keyword" attribute - keywords = association_proxy('user_keywords', 'keyword') + keywords = association_proxy("user_keywords", "keyword") def __init__(self, name): self.name = name + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String(50)) # bidirectional attribute/collection of "user"/"user_keywords" - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship( + User, backref=backref("user_keywords", cascade="all, delete-orphan") + ) # reference to the "Keyword" object keyword = relationship("Keyword") @@ -204,24 +209,25 @@ collection of ``User`` to the ``.keyword`` attribute present on each self.keyword = keyword self.special_key = special_key + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): - return 'Keyword(%s)' % repr(self.keyword) + return "Keyword(%s)" % repr(self.keyword) With the above configuration, we can operate upon the ``.keywords`` collection of each ``User`` object, each of which exposes a collection of ``Keyword`` objects that are obtained from the underyling ``UserKeyword`` elements:: - >>> user = User('log') - >>> for kw in (Keyword('new_from_blammo'), Keyword('its_big')): + >>> user = User("log") + >>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")): ... user.keywords.append(kw) ... >>> print(user.keywords) @@ -232,7 +238,7 @@ This example is in contrast to the example illustrated previously at a collection of strings, rather than a collection of composed objects. In this case, each ``.keywords.append()`` operation is equivalent to:: - >>> user.user_keywords.append(UserKeyword(Keyword('its_heavy'))) + >>> user.user_keywords.append(UserKeyword(Keyword("its_heavy"))) The ``UserKeyword`` association object has two attributes that are both populated within the scope of the ``append()`` operation of the association @@ -254,7 +260,7 @@ three attributes, wherein the assignment of ``.user`` during construction, has the effect of appending the new ``UserKeyword`` to the ``User.user_keywords`` collection (via the relationship):: - >>> UserKeyword(Keyword('its_wood'), user, special_key='my special key') + >>> UserKeyword(Keyword("its_wood"), user, special_key="my special key") The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: @@ -285,63 +291,69 @@ argument will be used as the key for the dictionary. We then apply a ``creator argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import backref, declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) # proxy to 'user_keywords', instantiating UserKeyword # assigning the new key to 'special_key', values to # 'keyword'. - keywords = association_proxy('user_keywords', 'keyword', - creator=lambda k, v: - UserKeyword(special_key=k, keyword=v) - ) + keywords = association_proxy( + "user_keywords", + "keyword", + creator=lambda k, v: UserKeyword(special_key=k, keyword=v), + ) def __init__(self, name): self.name = name + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) # bidirectional user/user_keywords relationships, mapping # user_keywords with a dictionary against "special_key" as key. - user = relationship(User, backref=backref( - "user_keywords", - collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan" - ) - ) + user = relationship( + User, + backref=backref( + "user_keywords", + collection_class=attribute_mapped_collection("special_key"), + cascade="all, delete-orphan", + ), + ) keyword = relationship("Keyword") + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword def __repr__(self): - return 'Keyword(%s)' % repr(self.keyword) + return "Keyword(%s)" % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the ``UserKeyword.special_key`` value to ``Keyword`` objects:: - >>> user = User('log') + >>> user = User("log") - >>> user.keywords['sk1'] = Keyword('kw1') - >>> user.keywords['sk2'] = Keyword('kw2') + >>> user.keywords["sk1"] = Keyword("kw1") + >>> user.keywords["sk2"] = Keyword("kw2") >>> print(user.keywords) {'sk1': Keyword('kw1'), 'sk2': Keyword('kw2')} @@ -360,24 +372,25 @@ and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy present on ``UserKeyword``:: - from sqlalchemy import Column, Integer, String, ForeignKey + from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy from sqlalchemy.orm import backref, declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) # the same 'user_keywords'->'keyword' proxy as in # the basic dictionary example. keywords = association_proxy( - 'user_keywords', - 'keyword', - creator=lambda k, v: UserKeyword(special_key=k, keyword=v) + "user_keywords", + "keyword", + creator=lambda k, v: UserKeyword(special_key=k, keyword=v), ) # another proxy that is directly column-targeted @@ -386,18 +399,19 @@ present on ``UserKeyword``:: def __init__(self, name): self.name = name + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(ForeignKey('user.id'), primary_key=True) - keyword_id = Column(ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(ForeignKey("user.id"), primary_key=True) + keyword_id = Column(ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) user = relationship( User, backref=backref( "user_keywords", collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan" - ) + cascade="all, delete-orphan", + ), ) # the relationship to Keyword is now called @@ -406,17 +420,17 @@ present on ``UserKeyword``:: # 'keyword' is changed to be a proxy to the # 'keyword' attribute of 'Keyword' - keyword = association_proxy('kw', 'keyword') + keyword = association_proxy("kw", "keyword") + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) def __init__(self, keyword): self.keyword = keyword - ``User.keywords`` is now a dictionary of string to string, where ``UserKeyword`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate @@ -526,23 +540,22 @@ Cascading Scalar Deletes Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 8516ad851ad..8dec8991a88 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -71,9 +71,11 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: from sqlalchemy.ext.asyncio import create_async_engine + async def async_main(): engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test", echo=True, + "postgresql+asyncpg://scott:tiger@localhost/test", + echo=True, ) async with engine.begin() as conn: @@ -85,7 +87,6 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: ) async with engine.connect() as conn: - # select a Result, which will be delivered with buffered # results result = await conn.execute(select(t1).where(t1.c.name == "some name 1")) @@ -96,6 +97,7 @@ to deliver a streaming server-side :class:`_asyncio.AsyncResult`:: # clean-up pooled connections await engine.dispose() + asyncio.run(async_main()) Above, the :meth:`_asyncio.AsyncConnection.run_sync` method may be used to @@ -123,7 +125,7 @@ cursor and provides an async/await API, such as an async iterator:: async_result = await conn.stream(select(t1)) async for row in async_result: - print("row: %s" % (row, )) + print("row: %s" % (row,)) .. _asyncio_orm: @@ -289,7 +291,6 @@ prevent this: ) async with async_session() as session: - result = await session.execute(select(A).order_by(A.id)) a1 = result.scalars().first() @@ -394,8 +395,9 @@ attribute accesses within a separate function:: import asyncio - from sqlalchemy.ext.asyncio import create_async_engine - from sqlalchemy.ext.asyncio import AsyncSession + from sqlalchemy import select + from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine + def fetch_and_update_objects(session): """run traditional sync-style ORM code in a function that will be @@ -424,7 +426,8 @@ attribute accesses within a separate function:: async def async_main(): engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test", echo=True, + "postgresql+asyncpg://scott:tiger@localhost/test", + echo=True, ) async with engine.begin() as conn: await conn.run_sync(Base.metadata.drop_all) @@ -448,6 +451,7 @@ attribute accesses within a separate function:: # clean-up pooled connections await engine.dispose() + asyncio.run(async_main()) The above approach of running certain functions within a "sync" runner @@ -524,18 +528,15 @@ constructs are illustrated below:: import asyncio - from sqlalchemy import text + from sqlalchemy import event, text from sqlalchemy.engine import Engine - from sqlalchemy import event - from sqlalchemy.ext.asyncio import AsyncSession - from sqlalchemy.ext.asyncio import create_async_engine + from sqlalchemy.ext.asyncio import AsyncSession, create_async_engine from sqlalchemy.orm import Session ## Core events ## - engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost:5432/test" - ) + engine = create_async_engine("postgresql+asyncpg://scott:tiger@localhost:5432/test") + # connect event on instance of Engine @event.listens_for(engine.sync_engine, "connect") @@ -547,10 +548,15 @@ constructs are illustrated below:: cursor.execute("select 'execute from event'") print(cursor.fetchone()[0]) + # before_execute event on all Engine instances @event.listens_for(Engine, "before_execute") def my_before_execute( - conn, clauseelement, multiparams, params, execution_options + conn, + clauseelement, + multiparams, + params, + execution_options, ): print("before execute!") @@ -559,6 +565,7 @@ constructs are illustrated below:: session = AsyncSession(engine) + # before_commit event on instance of Session @event.listens_for(session.sync_session, "before_commit") def my_before_commit(session): @@ -571,11 +578,13 @@ constructs are illustrated below:: result = connection.execute(text("select 'execute from event'")) print(result.first()) + # after_commit event on all Session instances @event.listens_for(Session, "after_commit") def my_after_commit(session): print("after commit!") + async def go(): await session.execute(text("select 1")) await session.commit() @@ -583,8 +592,10 @@ constructs are illustrated below:: await session.close() await engine.dispose() + asyncio.run(go()) + The above example prints something along the lines of:: New DBAPI connection: > @@ -665,15 +676,18 @@ method. The given function itself does not need to be declared as ``async``; it's perfectly fine for it to be a Python ``lambda:``, as the return awaitable value will be invoked after being returned:: - from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy import event + from sqlalchemy.ext.asyncio import create_async_engine engine = create_async_engine(...) + @event.listens_for(engine.sync_engine, "connect") def register_custom_types(dbapi_connection, ...): dbapi_connection.run_async( - lambda connection: connection.set_type_codec('MyCustomType', encoder, decoder, ...) + lambda connection: connection.set_type_codec( + "MyCustomType", encoder, decoder, ... + ) ) Above, the object passed to the ``register_custom_types`` event handler @@ -704,12 +718,14 @@ If the same engine must be shared between different loop, it should be configure to disable pooling using :class:`~sqlalchemy.pool.NullPool`, preventing the Engine from using any connection more than once:: + from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.pool import NullPool + engine = create_async_engine( - "postgresql+asyncpg://user:pass@host/dbname", poolclass=NullPool + "postgresql+asyncpg://user:pass@host/dbname", + poolclass=NullPool, ) - .. _asyncio_scoped_session: Using asyncio scoped session @@ -765,13 +781,11 @@ leveraging the :meth:`_asyncio.AsyncConnection.run_sync` method of import asyncio - from sqlalchemy.ext.asyncio import create_async_engine - from sqlalchemy.ext.asyncio import AsyncSession from sqlalchemy import inspect + from sqlalchemy.ext.asyncio import create_async_engine + + engine = create_async_engine("postgresql+asyncpg://scott:tiger@localhost/test") - engine = create_async_engine( - "postgresql+asyncpg://scott:tiger@localhost/test" - ) def use_inspector(conn): inspector = inspect(conn) @@ -780,10 +794,12 @@ leveraging the :meth:`_asyncio.AsyncConnection.run_sync` method of # return any value to the caller return inspector.get_table_names() + async def async_main(): async with engine.connect() as conn: tables = await conn.run_sync(use_inspector) + asyncio.run(async_main()) .. seealso:: diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index 4751fef3638..f22e28fa5ac 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -57,15 +57,15 @@ query build-up looks like the following:: from sqlalchemy import bindparam - def search_for_user(session, username, email=None): + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -130,7 +130,7 @@ compared to the equivalent "baked" query:: s = Session(bind=engine) for id_ in random.sample(ids, n): q = bakery(lambda s: s.query(Customer)) - q += lambda q: q.filter(Customer.id == bindparam('id')) + q += lambda q: q.filter(Customer.id == bindparam("id")) q(s).params(id=id_).one() The difference in Python function call count for an iteration of 10000 @@ -178,9 +178,10 @@ just building up the query, and removing its :class:`.Session` by calling my_simple_cache = {} + def lookup(session, id_argument): if "my_key" not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) my_simple_cache["my_key"] = query.with_session(None) else: query = my_simple_cache["my_key"].with_session(session) @@ -213,9 +214,8 @@ Our example becomes:: my_simple_cache = {} def lookup(session, id_argument): - if "my_key" not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) my_simple_cache["my_key"] = query.with_session(None).bake() else: query = my_simple_cache["my_key"].with_session(session) @@ -231,9 +231,10 @@ a simple improvement upon the simple "reuse a query" approach:: bakery = baked.bakery() + def lookup(session, id_argument): def create_model_query(session): - return session.query(Model).filter(Model.id == bindparam('id')) + return session.query(Model).filter(Model.id == bindparam("id")) parameterized_query = bakery.bake(create_model_query) return parameterized_query(session).params(id=id_argument).all() @@ -256,6 +257,7 @@ query on a conditional basis:: my_simple_cache = {} + def lookup(session, id_argument, include_frobnizzle=False): if include_frobnizzle: cache_key = "my_key_with_frobnizzle" @@ -263,7 +265,7 @@ query on a conditional basis:: cache_key = "my_key_without_frobnizzle" if cache_key not in my_simple_cache: - query = session.query(Model).filter(Model.id == bindparam('id')) + query = session.query(Model).filter(Model.id == bindparam("id")) if include_frobnizzle: query = query.filter(Model.frobnizzle == True) @@ -284,9 +286,10 @@ into a direct use of "bakery" as follows:: bakery = baked.bakery() + def lookup(session, id_argument, include_frobnizzle=False): def create_model_query(session): - return session.query(Model).filter(Model.id == bindparam('id')) + return session.query(Model).filter(Model.id == bindparam("id")) parameterized_query = bakery.bake(create_model_query) @@ -295,7 +298,8 @@ into a direct use of "bakery" as follows:: return query.filter(Model.frobnizzle == True) parameterized_query = parameterized_query.with_criteria( - include_frobnizzle_in_query) + include_frobnizzle_in_query + ) return parameterized_query(session).params(id=id_argument).all() @@ -315,10 +319,11 @@ means to reduce verbosity:: bakery = baked.bakery() + def lookup(session, id_argument, include_frobnizzle=False): parameterized_query = bakery.bake( - lambda s: s.query(Model).filter(Model.id == bindparam('id')) - ) + lambda s: s.query(Model).filter(Model.id == bindparam("id")) + ) if include_frobnizzle: parameterized_query += lambda q: q.filter(Model.frobnizzle == True) @@ -358,10 +363,10 @@ statement compilation time:: baked_query = bakery(lambda session: session.query(User)) baked_query += lambda q: q.filter( - User.name.in_(bindparam('username', expanding=True))) + User.name.in_(bindparam("username", expanding=True)) + ) - result = baked_query.with_session(session).params( - username=['ed', 'fred']).all() + result = baked_query.with_session(session).params(username=["ed", "fred"]).all() .. seealso:: @@ -388,8 +393,7 @@ of the baked query:: # select a correlated subquery in the top columns list, # we have the "session" argument, pass that - my_q = bakery( - lambda s: s.query(Address.id, my_subq.to_query(s).as_scalar())) + my_q = bakery(lambda s: s.query(Address.id, my_subq.to_query(s).as_scalar())) # use a correlated subquery in some of the criteria, we have # the "query" argument, pass that. @@ -413,12 +417,11 @@ alter the query differently each time. To allow a still to allow the result to be cached, the event can be registered passing the ``bake_ok=True`` flag:: - @event.listens_for( - Query, "before_compile", retval=True, bake_ok=True) + @event.listens_for(Query, "before_compile", retval=True, bake_ok=True) def my_event(query): for desc in query.column_descriptions: - if desc['type'] is User: - entity = desc['entity'] + if desc["type"] is User: + entity = desc["entity"] query = query.filter(entity.deleted == False) return query diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index aa0a7f32d8a..368c151009b 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -92,32 +92,33 @@ alter classes dynamically at runtime. To cover the major areas where this occurs, consider the following ORM mapping, using the typical example of the ``User`` class:: - from sqlalchemy import Column - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import select + from sqlalchemy import Column, Integer, String, select from sqlalchemy.orm import declarative_base # "Base" is a class that is created dynamically from the # declarative_base() function Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + # "some_user" is an instance of the User class, which # accepts "id" and "name" kwargs based on the mapping - some_user = User(id=5, name='user') + some_user = User(id=5, name="user") # it has an attribute called .name that's a string print(f"Username: {some_user.name}") # a select() construct makes use of SQL expressions derived from the # User class itself - select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains('s')) + select_stmt = ( + select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) + ) Above, the steps that the Mypy extension can take include: @@ -143,35 +144,37 @@ When the Mypy plugin processes the above file, the resulting static class definition and Python code passed to the Mypy tool is equivalent to the following:: - from sqlalchemy import Column - from sqlalchemy import Integer - from sqlalchemy import String - from sqlalchemy import select - from sqlalchemy.orm import declarative_base - from sqlalchemy.orm.decl_api import DeclarativeMeta + from sqlalchemy import Column, Integer, String, select from sqlalchemy.orm import Mapped + from sqlalchemy.orm.decl_api import DeclarativeMeta + class Base(metaclass=DeclarativeMeta): __abstract__ = True + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id: Mapped[Optional[int]] = Mapped._special_method( Column(Integer, primary_key=True) ) - name: Mapped[Optional[str]] = Mapped._special_method( - Column(String) - ) + name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: + def __init__( + self, id: Optional[int] = ..., name: Optional[str] = ... + ) -> None: ... - some_user = User(id=5, name='user') + + some_user = User(id=5, name="user") print(f"Username: {some_user.name}") - select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains('s')) + select_stmt = ( + select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) + ) + The key steps which have been taken above include: @@ -251,6 +254,7 @@ and convert them to include the ``Mapped[]`` type surrounding them. The from sqlalchemy.orm import Mapped + class MyClass(Base): # ... @@ -307,14 +311,16 @@ needs an explicit type to be sent:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -331,7 +337,7 @@ To resolve, apply an explicit type annotation to the ``Address.user_id`` column:: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -352,7 +358,7 @@ the attributes can be explicitly stated with a complete annotation that Base.metadata, Column(Integer, primary_key=True), Column("employee_name", String(50), nullable=False), - Column(String(50)) + Column(String(50)), ) id: Mapped[int] @@ -379,13 +385,14 @@ present, as well as if the target type of the :func:`_orm.relationship` is a string or callable, and not a class:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -404,7 +411,7 @@ The error can be resolved either by using ``relationship(User, uselist=False)`` or by providing the type, in this case the scalar ``User`` object:: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -419,7 +426,8 @@ by pep-484, ensuring the class is imported with in the `TYPE_CHECKING block `_ as appropriate:: - from typing import List, TYPE_CHECKING + from typing import TYPE_CHECKING, List + from .mymodel import Base if TYPE_CHECKING: @@ -427,8 +435,9 @@ as appropriate:: # that cannot normally be imported at runtime from .myaddressmodel import Address + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -438,15 +447,18 @@ As is the case with columns, the :class:`_orm.Mapped` class may also be applied explicitly:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") + addresses: Mapped[List["Address"]] = relationship( + "Address", back_populates="user" + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) user_id: int = Column(ForeignKey("user.id")) @@ -469,8 +481,8 @@ such as :meth:`_orm.registry.mapped`) should be decorated with the :func:`_orm.declarative_mixin` decorator, which provides a hint to the Mypy plugin that a particular class intends to serve as a declarative mixin:: - from sqlalchemy.orm import declared_attr - from sqlalchemy.orm import declarative_mixin + from sqlalchemy.orm import declarative_mixin, declared_attr + @declarative_mixin class HasUpdatedAt: @@ -478,9 +490,9 @@ plugin that a particular class intends to serve as a declarative mixin:: def updated_at(cls) -> Column[DateTime]: # uses Column return Column(DateTime) + @declarative_mixin class HasCompany: - @declared_attr def company_id(cls) -> Mapped[int]: # uses Mapped return Column(ForeignKey("company.id")) @@ -489,8 +501,9 @@ plugin that a particular class intends to serve as a declarative mixin:: def company(cls) -> Mapped["Company"]: return relationship("Company") + class Employee(HasUpdatedAt, HasCompany, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String) @@ -505,7 +518,6 @@ this complexity:: company_id: Mapped[int] company: Mapped["Company"] - Combining with Dataclasses or Other Type-Sensitive Attribute Systems ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -515,7 +527,7 @@ use to build the class, and the value given in each assignment statement is significant. That is, a class as follows has to be stated exactly as it is in order to be accepted by dataclasses:: - mapper_registry : registry = registry() + mapper_registry: registry = registry() @mapper_registry.mapped @@ -536,9 +548,7 @@ as it is in order to be accepted by dataclasses:: addresses: List[Address] = field(default_factory=list) __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } + "properties": {"addresses": relationship("Address")} } We can't apply our ``Mapped[]`` types to the attributes ``id``, ``name``, @@ -578,9 +588,7 @@ This attribute can be conditional within the ``TYPE_CHECKING`` variable:: _mypy_mapped_attrs = [id, name, "fullname", "nickname", addresses] __mapper_args__ = { # type: ignore - "properties" : { - "addresses": relationship("Address") - } + "properties": {"addresses": relationship("Address")} } With the above recipe, the attributes listed in ``_mypy_mapped_attrs`` diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index eafbba342ac..d179864ebc5 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -45,14 +45,14 @@ additional arguments that will refer to the polymorphic discriminator column as well as the identifier for the base class:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } Above, an additional column ``type`` is established to act as the @@ -82,21 +82,22 @@ they represent. Each table also must contain a primary key column (or columns), as well as a foreign key reference to the parent table:: class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_identity':'engineer', + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } In the above example, each mapping specifies the @@ -159,29 +160,32 @@ the ``company`` table, the relationships are set up between ``Company`` and ``Employee``:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - # ... + ... + class Engineer(Employee): - # ... + ... If the foreign key constraint is on a table corresponding to a subclass, the relationship should target that subclass instead. In the example @@ -190,36 +194,39 @@ key constraint from ``manager`` to ``company``, so the relationships are established between the ``Manager`` and ``Company`` classes:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) managers = relationship("Manager", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="managers") __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } + class Engineer(Employee): - # ... + ... Above, the ``Manager`` class will have a ``Manager.company`` attribute; ``Company`` will have a ``Company.managers`` attribute that always @@ -263,28 +270,30 @@ subclasses, indicating that the column is to be mapped only to that subclass; the :class:`_schema.Column` will be applied to the same base :class:`_schema.Table` object:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Manager(Employee): manager_data = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'manager' + "polymorphic_identity": "manager", } + class Engineer(Employee): engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Note that the mappers for the derived classes Manager and Engineer omit the @@ -302,22 +311,28 @@ declaration on a subclass that has no table of its own. A tricky case comes up when two subclasses want to specify *the same* column, as below:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } start_date = Column(DateTime) + class Manager(Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } start_date = Column(DateTime) Above, the ``start_date`` column declared on both ``Engineer`` and ``Manager`` @@ -335,32 +350,39 @@ if it already exists:: from sqlalchemy.orm import declared_attr + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } @declared_attr def start_date(cls): "Start date column, if not present already." - return Employee.__table__.c.get('start_date', Column(DateTime)) + return Employee.__table__.c.get("start_date", Column(DateTime)) + class Manager(Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } @declared_attr def start_date(cls): "Start date column, if not present already." - return Employee.__table__.c.get('start_date', Column(DateTime)) + return Employee.__table__.c.get("start_date", Column(DateTime)) Above, when ``Manager`` is mapped, the ``start_date`` column is already present on the ``Employee`` class; by returning the existing @@ -372,26 +394,33 @@ to define a particular series of columns and/or other mapped attributes from a reusable mixin class:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(20)) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee' + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class HasStartDate: @declared_attr def start_date(cls): - return cls.__table__.c.get('start_date', Column(DateTime)) + return cls.__table__.c.get("start_date", Column(DateTime)) + class Engineer(HasStartDate, Employee): - __mapper_args__ = {'polymorphic_identity': 'engineer'} + __mapper_args__ = { + "polymorphic_identity": "engineer", + } + class Manager(HasStartDate, Employee): - __mapper_args__ = {'polymorphic_identity': 'manager'} + __mapper_args__ = { + "polymorphic_identity": "manager", + } Relationships with Single Table Inheritance +++++++++++++++++++++++++++++++++++++++++++ @@ -402,22 +431,23 @@ attribute should be on the same class that's the "foreign" side of the relationship:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } @@ -425,14 +455,15 @@ relationship:: manager_data = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'manager' + "polymorphic_identity": "manager", } + class Engineer(Employee): engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Also, like the case of joined inheritance, we can create relationships @@ -441,31 +472,32 @@ include a WHERE clause that limits the class selection to that subclass or subclasses:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) managers = relationship("Manager", back_populates="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type + "polymorphic_identity": "employee", + "polymorphic_on": type, } class Manager(Employee): manager_name = Column(String(30)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="managers") __mapper_args__ = { - 'polymorphic_identity':'manager', + "polymorphic_identity": "manager", } @@ -473,7 +505,7 @@ or subclasses:: engineer_info = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'engineer' + "polymorphic_identity": "engineer", } Above, the ``Manager`` class will have a ``Manager.company`` attribute; @@ -533,31 +565,33 @@ This indicates to Declarative as well as the mapping that the superclass table should not be considered as part of the mapping:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(50)) __mapper_args__ = { - 'concrete': True + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(50)) __mapper_args__ = { - 'concrete': True + "concrete": True, } Two critical points should be noted: @@ -604,36 +638,39 @@ almost the same way as we do other forms of inheritance mappings:: from sqlalchemy.ext.declarative import ConcreteBase + class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } Above, Declarative sets up the polymorphic selectable for the @@ -703,24 +740,26 @@ base class with the ``__abstract__`` indicator:: class Employee(Base): __abstract__ = True + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'manager', + "polymorphic_identity": "manager", } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', + "polymorphic_identity": "engineer", } Above, we are not actually making use of SQLAlchemy's inheritance mapping @@ -751,29 +790,32 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: from sqlalchemy.ext.declarative import AbstractConcreteBase + class Employee(AbstractConcreteBase, Base): pass + class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } + class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } The :class:`.AbstractConcreteBase` helper class has a more complex internal @@ -801,34 +843,41 @@ establishes the :class:`_schema.Table` objects separately:: metadata_obj = Base.metadata employees_table = Table( - 'employee', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), + "employee", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) managers_table = Table( - 'manager', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('manager_data', String(50)), + "manager", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("manager_data", String(50)), ) engineers_table = Table( - 'engineer', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('engineer_info', String(50)), + "engineer", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("engineer_info", String(50)), ) Next, the UNION is produced using :func:`.polymorphic_union`:: from sqlalchemy.orm import polymorphic_union - pjoin = polymorphic_union({ - 'employee': employees_table, - 'manager': managers_table, - 'engineer': engineers_table - }, 'type', 'pjoin') + pjoin = polymorphic_union( + { + "employee": employees_table, + "manager": managers_table, + "engineer": engineers_table, + }, + "type", + "pjoin", + ) With the above :class:`_schema.Table` objects, the mappings can be produced using "semi-classical" style, where we use Declarative in conjunction with the ``__table__`` argument; @@ -838,22 +887,26 @@ the :paramref:`.mapper.with_polymorphic` parameter:: class Employee(Base): __table__ = employee_table __mapper_args__ = { - 'polymorphic_on': pjoin.c.type, - 'with_polymorphic': ('*', pjoin), - 'polymorphic_identity': 'employee' + "polymorphic_on": pjoin.c.type, + "with_polymorphic": ("*", pjoin), + "polymorphic_identity": "employee", } + class Engineer(Employee): __table__ = engineer_table __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True} + "polymorphic_identity": "engineer", + "concrete": True, + } + class Manager(Employee): __table__ = manager_table __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True} + "polymorphic_identity": "manager", + "concrete": True, + } Alternatively, the same :class:`_schema.Table` objects can be used in fully "classical" style, without using Declarative at all. @@ -864,16 +917,19 @@ A constructor similar to that supplied by Declarative is illustrated:: for k in kw: setattr(self, k, kw[k]) + class Manager(Employee): pass + class Engineer(Employee): pass + employee_mapper = mapper_registry.map_imperatively( Employee, pjoin, - with_polymorphic=('*', pjoin), + with_polymorphic=("*", pjoin), polymorphic_on=pjoin.c.type, ) manager_mapper = mapper_registry.map_imperatively( @@ -881,18 +937,16 @@ A constructor similar to that supplied by Declarative is illustrated:: managers_table, inherits=employee_mapper, concrete=True, - polymorphic_identity='manager', + polymorphic_identity="manager", ) engineer_mapper = mapper_registry.map_imperatively( Engineer, engineers_table, inherits=employee_mapper, concrete=True, - polymorphic_identity='engineer', + polymorphic_identity="engineer", ) - - The "abstract" example can also be mapped using "semi-classical" or "classical" style. The difference is that instead of applying the "polymorphic union" to the :paramref:`.mapper.with_polymorphic` parameter, we apply it directly @@ -901,30 +955,40 @@ mapping is illustrated below:: from sqlalchemy.orm import polymorphic_union - pjoin = polymorphic_union({ - 'manager': managers_table, - 'engineer': engineers_table - }, 'type', 'pjoin') + pjoin = polymorphic_union( + { + "manager": managers_table, + "engineer": engineers_table, + }, + "type", + "pjoin", + ) + class Employee(Base): __table__ = pjoin __mapper_args__ = { - 'polymorphic_on': pjoin.c.type, - 'with_polymorphic': '*', - 'polymorphic_identity': 'employee' + "polymorphic_on": pjoin.c.type, + "with_polymorphic": "*", + "polymorphic_identity": "employee", } + class Engineer(Employee): __table__ = engineer_table __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True} + "polymorphic_identity": "engineer", + "concrete": True, + } + class Manager(Employee): __table__ = manager_table __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True} + "polymorphic_identity": "manager", + "concrete": True, + } + Above, we use :func:`.polymorphic_union` in the same manner as before, except that we omit the ``employee`` table. @@ -955,47 +1019,47 @@ such a configuration is as follows:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee") class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } The next complexity with concrete inheritance and relationships involves @@ -1015,50 +1079,50 @@ each of the relationships:: class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) employees = relationship("Employee", back_populates="company") class Employee(ConcreteBase, Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'concrete': True + "polymorphic_identity": "employee", + "concrete": True, } class Manager(Employee): - __tablename__ = 'manager' + __tablename__ = "manager" id = Column(Integer, primary_key=True) name = Column(String(50)) manager_data = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'manager', - 'concrete': True + "polymorphic_identity": "manager", + "concrete": True, } class Engineer(Employee): - __tablename__ = 'engineer' + __tablename__ = "engineer" id = Column(Integer, primary_key=True) name = Column(String(50)) engineer_info = Column(String(40)) - company_id = Column(ForeignKey('company.id')) + company_id = Column(ForeignKey("company.id")) company = relationship("Company", back_populates="employees") __mapper_args__ = { - 'polymorphic_identity': 'engineer', - 'concrete': True + "polymorphic_identity": "engineer", + "concrete": True, } The above limitation is related to the current implementation, including From 7256643fd8bab26d389c872102b76a6fdef2731f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 23 May 2022 09:48:05 -0400 Subject: [PATCH 243/632] remove insanely old note from 12 years ago Change-Id: Id0929b6bc062fc4766c9c69427524e3cd2da1030 (cherry picked from commit 853f726454cff2f34c010cbafacfec85f51f8eeb) --- doc/build/orm/collections.rst | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 1c40e795589..800d2613bdc 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -622,21 +622,6 @@ must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated. -.. note:: - - Due to a bug in MappedCollection prior to version 0.7.6, this - workaround usually needs to be called before a custom subclass - of :class:`.MappedCollection` which uses :meth:`.collection.internally_instrumented` - can be used:: - - from sqlalchemy.orm.collections import MappedCollection, _instrument_class - - _instrument_class(MappedCollection) - - This will ensure that the :class:`.MappedCollection` has been properly - initialized with custom ``__setitem__()`` and ``__delitem__()`` - methods before used in a custom subclass. - .. autoclass:: sqlalchemy.orm.collections.MappedCollection :members: From e1a2247e71eb6298952ba9c0b65d4a796ad72dc4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 24 May 2022 12:27:59 -0400 Subject: [PATCH 244/632] use random table name this test is failing on CI with "##foo does not exist", so hypothesize there's some kind of race condition with global temp table names. Change-Id: I8c6c26a7fda70f67735ce20af67373c311e48731 (cherry picked from commit d7b131d2dfc4c519b23d9ed29364036ef88b1863) --- test/dialect/mssql/test_reflection.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index 781b4ef188f..4c5a5398164 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -1,6 +1,7 @@ # -*- encoding: utf-8 import datetime import decimal +import random from sqlalchemy import Column from sqlalchemy import DDL @@ -388,12 +389,16 @@ def test_global_temp_different_collation( ): """test #8035""" + tname = "##foo%s" % (random.randint(1, 1000000),) + with temp_db_alt_collation_fixture.connect() as conn: - conn.exec_driver_sql("CREATE TABLE ##foo (id int primary key)") + conn.exec_driver_sql( + "CREATE TABLE %s (id int primary key)" % (tname,) + ) conn.commit() eq_( - inspect(conn).get_columns("##foo"), + inspect(conn).get_columns(tname), [ { "name": "id", @@ -404,7 +409,7 @@ def test_global_temp_different_collation( } ], ) - Table("##foo", MetaData(), autoload_with=conn) + Table(tname, MetaData(), autoload_with=conn) def test_db_qualified_items(self, metadata, connection): Table("foo", metadata, Column("id", Integer, primary_key=True)) From 25044513aa5100f2329fd2006bb97cd2136ff709 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 23 May 2022 10:34:32 -0400 Subject: [PATCH 245/632] enable pg8000 for 1.29.1 and above ROLLBACK TO SAVEPOINT is re-enabled in https://github.com/tlocke/pg8000/issues/111. we still have to add savepoint support to our fixture that deletes from tables without checking for them. this is inconvenient but not incorrect. Change-Id: I2f4a0a3e18db93c3e6794ade9b0fee33d2e4b7dc (cherry picked from commit c0612f8166b7cd07895e7302bb59192abbb68c43) --- lib/sqlalchemy/testing/fixtures.py | 10 +++++++++- setup.cfg | 2 +- test/dialect/postgresql/test_query.py | 26 +++++++++++++++++++------- test/engine/test_transaction.py | 10 ++++++++-- 4 files changed, 37 insertions(+), 11 deletions(-) diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index f5bdd44922e..8c2e9d8de6c 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -439,6 +439,10 @@ def _teardown_each_tables(self): elif self.run_create_tables == "each": drop_all_tables_from_metadata(self._tables_metadata, self.bind) + savepoints = getattr(config.requirements, "savepoints", False) + if savepoints: + savepoints = savepoints.enabled + # no need to run deletes if tables are recreated on setup if ( self.run_define_tables != "each" @@ -456,7 +460,11 @@ def _teardown_each_tables(self): ] ): try: - conn.execute(table.delete()) + if savepoints: + with conn.begin_nested(): + conn.execute(table.delete()) + else: + conn.execute(table.delete()) except sa.exc.DBAPIError as ex: util.print_( ("Error emptying table %s: %r" % (table, ex)), diff --git a/setup.cfg b/setup.cfg index 10fab0bbfbc..e49a1c9c20e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -63,7 +63,7 @@ oracle = cx_oracle>=7,<8;python_version<"3" cx_oracle>=7;python_version>="3" postgresql = psycopg2>=2.7 -postgresql_pg8000 = pg8000>=1.16.6,<1.29 +postgresql_pg8000 = pg8000>=1.16.6,!=1.29.0 postgresql_asyncpg = %(asyncio)s asyncpg;python_version>="3" diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py index a1e9c465729..d0f5d429b41 100644 --- a/test/dialect/postgresql/test_query.py +++ b/test/dialect/postgresql/test_query.py @@ -75,7 +75,8 @@ def test_foreignkey_missing_insert(self, implicit_returning): # the case here due to the foreign key. with expect_warnings(".*has no Python-side or server-side default.*"): - with engine.begin() as conn: + with engine.connect() as conn: + conn.begin() assert_raises( (exc.IntegrityError, exc.ProgrammingError), conn.execute, @@ -596,7 +597,8 @@ def _assert_data_noautoincrement(self, table): with engine.begin() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -606,8 +608,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), {"data": "d2"}, ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -617,8 +621,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -628,8 +634,10 @@ def _assert_data_noautoincrement(self, table): table.insert(), {"data": "d2"}, ) + trans.rollback() - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -639,6 +647,7 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() with engine.begin() as conn: conn.execute( @@ -660,7 +669,8 @@ def _assert_data_noautoincrement(self, table): with engine.begin() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -671,7 +681,8 @@ def _assert_data_noautoincrement(self, table): {"data": "d2"}, ) - with engine.begin() as conn: + with engine.connect() as conn: + trans = conn.begin() with expect_warnings( ".*has no Python-side or server-side default.*" ): @@ -681,6 +692,7 @@ def _assert_data_noautoincrement(self, table): table.insert(), [{"data": "d2"}, {"data": "d3"}], ) + trans.rollback() with engine.begin() as conn: conn.execute( diff --git a/test/engine/test_transaction.py b/test/engine/test_transaction.py index 43b42647eb4..85e39c49815 100644 --- a/test/engine/test_transaction.py +++ b/test/engine/test_transaction.py @@ -346,7 +346,10 @@ def test_savepoint_rollback_fails_flat(self, local_connection): with testing.expect_warnings("nested transaction already"): s1.rollback() # no error (though it warns) - t1.commit() # no error + # this test was previously calling "commit", but note relies on + # buggy behavior in PostgreSQL as the transaction block is in fact + # aborted. pg8000 enforces this on the client as of 1.29 + t1.rollback() # no error @testing.requires.savepoints_w_release def test_savepoint_release_fails_flat(self): @@ -368,7 +371,10 @@ def test_savepoint_release_fails_flat(self): assert not s1.is_active s1.rollback() # no error. prior to 1.4 this would try to rollback - t1.commit() # no error + # this test was previously calling "commit", but note relies on + # buggy behavior in PostgreSQL as the transaction block is in fact + # aborted. pg8000 enforces this on the client as of 1.29 + t1.rollback() # no error @testing.requires.savepoints_w_release def test_savepoint_release_fails_ctxmanager(self, local_connection): From 853d6be759ad79d0d3e1d6a52fc7c9c32c0146ec Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 25 May 2022 08:47:29 -0400 Subject: [PATCH 246/632] apply bindparam escape name to processors dictionary Fixed SQL compiler issue where the "bind processing" function for a bound parameter would not be correctly applied to a bound value if the bound parameter's name were "escaped". Concretely, this applies, among other cases, to Oracle when a :class:`.Column` has a name that itself requires quoting, such that the quoting-required name is then used for the bound parameters generated within DML statements, and the datatype in use requires bind processing, such as the :class:`.Enum` datatype. Fixes: #8053 Change-Id: I39d060a87e240b4ebcfccaa9c535e971b7255d99 (cherry picked from commit 4d58ca05e83048e999059a8c2c2e67cb77abf976) --- doc/build/changelog/unreleased_14/8053.rst | 11 ++++++ lib/sqlalchemy/sql/compiler.py | 10 +++++- test/dialect/oracle/test_dialect.py | 19 ++++++++++ test/sql/test_compiler.py | 42 ++++++++++++++++++++++ 4 files changed, 81 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8053.rst diff --git a/doc/build/changelog/unreleased_14/8053.rst b/doc/build/changelog/unreleased_14/8053.rst new file mode 100644 index 00000000000..316b6385941 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8053.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, oracle + :tickets: 8053 + + Fixed SQL compiler issue where the "bind processing" function for a bound + parameter would not be correctly applied to a bound value if the bound + parameter's name were "escaped". Concretely, this applies, among other + cases, to Oracle when a :class:`.Column` has a name that itself requires + quoting, such that the quoting-required name is then used for the bound + parameters generated within DML statements, and the datatype in use + requires bind processing, such as the :class:`.Enum` datatype. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index bc2d657fb51..fa158863da9 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -898,8 +898,16 @@ def _apply_numbered_params(self): @util.memoized_property def _bind_processors(self): + _escaped_bind_names = self.escaped_bind_names + has_escaped_names = bool(_escaped_bind_names) + return dict( - (key, value) + ( + _escaped_bind_names.get(key, key) + if has_escaped_names + else key, + value, + ) for key, value in ( ( self.bind_names[bindparam], diff --git a/test/dialect/oracle/test_dialect.py b/test/dialect/oracle/test_dialect.py index d65a6d2b53a..f494b59aeff 100644 --- a/test/dialect/oracle/test_dialect.py +++ b/test/dialect/oracle/test_dialect.py @@ -5,6 +5,7 @@ from sqlalchemy import bindparam from sqlalchemy import Computed from sqlalchemy import create_engine +from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func @@ -32,6 +33,7 @@ from sqlalchemy.testing import mock from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.schema import Column +from sqlalchemy.testing.schema import pep435_enum from sqlalchemy.testing.schema import Table from sqlalchemy.testing.suite import test_select from sqlalchemy.util import u @@ -564,6 +566,23 @@ def test_numeric_bind_round_trip(self, connection): 4, ) + def test_param_w_processors(self, metadata, connection): + """test #8053""" + + SomeEnum = pep435_enum("SomeEnum") + one = SomeEnum("one", 1) + SomeEnum("two", 2) + + t = Table( + "t", + metadata, + Column("_id", Integer, primary_key=True), + Column("_data", Enum(SomeEnum)), + ) + t.create(connection) + connection.execute(t.insert(), {"_id": 1, "_data": one}) + eq_(connection.scalar(select(t.c._data)), one) + def test_numeric_bind_in_crud(self, metadata, connection): t = Table("asfd", metadata, Column("100K", Integer)) t.create(connection) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 33f84142bc8..99addb986d3 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -25,6 +25,7 @@ from sqlalchemy import Date from sqlalchemy import desc from sqlalchemy import distinct +from sqlalchemy import Enum from sqlalchemy import exc from sqlalchemy import except_ from sqlalchemy import exists @@ -96,6 +97,7 @@ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing import ne_ +from sqlalchemy.testing.schema import pep435_enum from sqlalchemy.util import u table1 = table( @@ -3655,6 +3657,46 @@ def test_binds(self): s, ) + def test_bind_param_escaping(self): + """general bind param escape unit tests added as a result of + #8053 + # + #""" + + SomeEnum = pep435_enum("SomeEnum") + one = SomeEnum("one", 1) + SomeEnum("two", 2) + + t = Table( + "t", + MetaData(), + Column("_id", Integer, primary_key=True), + Column("_data", Enum(SomeEnum)), + ) + + class MyCompiler(compiler.SQLCompiler): + def bindparam_string(self, name, **kw): + kw["escaped_from"] = name + return super(MyCompiler, self).bindparam_string( + '"%s"' % name, **kw + ) + + dialect = default.DefaultDialect() + dialect.statement_compiler = MyCompiler + + self.assert_compile( + t.insert(), + 'INSERT INTO t (_id, _data) VALUES (:"_id", :"_data")', + dialect=dialect, + ) + + compiled = t.insert().compile( + dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) + ) + params = compiled.construct_params({"_id": 1, "_data": one}) + eq_(params, {'"_id"': 1, '"_data"': one}) + eq_(compiled._bind_processors, {'"_data"': mock.ANY}) + def test_expanding_non_expanding_conflict(self): """test #8018""" From ba0d68b99182871b0ef9d554fc9804ae434f573d Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Sun, 29 May 2022 07:07:45 -0600 Subject: [PATCH 247/632] mssql login failure if password starts with "{" Fix issue where a password with a leading "{" would result in login failure. Fixes: #8062 Change-Id: If91c2c211937b5eac89b8d525c22a19b0a94c5c4 (cherry picked from commit 8ac7cb92b4972a08b8008b80b34989694510139f) --- doc/build/changelog/unreleased_14/8062.rst | 5 ++ lib/sqlalchemy/connectors/pyodbc.py | 2 +- test/dialect/mssql/test_engine.py | 54 ++++++++++++++++------ 3 files changed, 45 insertions(+), 16 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8062.rst diff --git a/doc/build/changelog/unreleased_14/8062.rst b/doc/build/changelog/unreleased_14/8062.rst new file mode 100644 index 00000000000..ada473de9ca --- /dev/null +++ b/doc/build/changelog/unreleased_14/8062.rst @@ -0,0 +1,5 @@ +.. change:: + :tags: bug, mssql + :tickets: 8062 + + Fix issue where a password with a leading "{" would result in login failure. diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index 7a97aa16c78..9bb67b5113f 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -60,7 +60,7 @@ def create_connect_args(self, url): else: def check_quote(token): - if ";" in str(token): + if ";" in str(token) or str(token).startswith("{"): token = "{%s}" % token.replace("}", "}}") return token diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index 5482e261670..b5a04f1405b 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -234,25 +234,49 @@ def test_pyodbc_odbc_connect_ignores_other_values(self): connection, ) - def test_pyodbc_token_injection(self): - token1 = "someuser%3BPORT%3D50001" - token2 = "some{strange}pw%3BPORT%3D50001" - token3 = "somehost%3BPORT%3D50001" - token4 = "somedb%3BPORT%3D50001" - - u = url.make_url( - "mssql+pyodbc://%s:%s@%s/%s?driver=foob" - % (token1, token2, token3, token4) - ) - dialect = pyodbc.dialect() - connection = dialect.create_connect_args(u) - eq_( - [ + @testing.combinations( + ( + "original", + ( + "someuser%3BPORT%3D50001", + "some{strange}pw%3BPORT%3D50001", + "somehost%3BPORT%3D50001", + "somedb%3BPORT%3D50001", + ), + ( [ "DRIVER={foob};Server=somehost%3BPORT%3D50001;" "Database=somedb%3BPORT%3D50001;UID={someuser;PORT=50001};" "PWD={some{strange}}pw;PORT=50001}" - ], + ] + ), + ), + ( + "issue_8062", + ( + "larry", + "{moe", + "localhost", + "mydb", + ), + ( + [ + "DRIVER={foob};Server=localhost;" + "Database=mydb;UID=larry;" + "PWD={{moe}" + ] + ), + ), + argnames="tokens, connection_string", + id_="iaa", + ) + def test_pyodbc_token_injection(self, tokens, connection_string): + u = url.make_url("mssql+pyodbc://%s:%s@%s/%s?driver=foob" % tokens) + dialect = pyodbc.dialect() + connection = dialect.create_connect_args(u) + eq_( + [ + connection_string, {}, ], connection, From 03f886aaf5b6133ec637fb2cbf3c0e84240dd156 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 29 May 2022 12:07:46 -0400 Subject: [PATCH 248/632] move bindparam quote application from compiler to default in 296c84313ab29bf9599634f3 for #5653 we generalized Oracle's parameter escaping feature into the compiler, so that it could also work for PostgreSQL. The compiler used quoted names within parameter dictionaries, which then led to the complexity that all functions which interpreted keys from the compiled_params dict had to also quote the param names to use the dictionary. This extra complexity was not added to the ORM peristence.py however, which led to the versioning id feature being broken as well as other areas where persistence.py relies on naming schemes present in context.compiled_params. It also was not added to the "processors" lookup which led to #8053, that added this escaping to that part of the compiler. To both solve the whole problem as well as simplify the compiler quite a bit, move the actual application of the escaped names to be as late as possible, when default.py builds the final list of parameters. This is more similar to how it worked previously where OracleExecutionContext would be late-applying these escaped names. This re-establishes context.compiled_params as deterministically named regardless of dialect in use and moves out the complexity of the quoted param names to be only at the cursor.execute stage. Fixed bug, likely a regression from 1.3, where usage of column names that require bound parameter escaping, more concretely when using Oracle with column names that require quoting such as those that start with an underscore, or in less common cases with some PostgreSQL drivers when using column names that contain percent signs, would cause the ORM versioning feature to not work correctly if the versioning column itself had such a name, as the ORM assumes certain bound parameter naming conventions that were being interfered with via the quotes. This issue is related to :ticket:`8053` and essentially revises the approach towards fixing this, revising the original issue :ticket:`5653` that created the initial implementation for generalized bound-parameter name quoting. Fixes: #8056 Change-Id: I57b064e8f0d070e328b65789c30076f6a0ca0fef (cherry picked from commit a48b597d0cafa1dd7fc46be99eb808fd4cb0a347) --- doc/build/changelog/unreleased_14/8056.rst | 15 ++++++ lib/sqlalchemy/engine/default.py | 47 ++++++++++++++----- lib/sqlalchemy/sql/compiler.py | 53 +++++++--------------- lib/sqlalchemy/testing/fixtures.py | 4 ++ test/orm/test_versioning.py | 52 +++++++++++++++++++++ test/sql/test_compiler.py | 19 ++++++-- test/sql/test_external_traversal.py | 2 + 7 files changed, 139 insertions(+), 53 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8056.rst diff --git a/doc/build/changelog/unreleased_14/8056.rst b/doc/build/changelog/unreleased_14/8056.rst new file mode 100644 index 00000000000..a5a61fa3211 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8056.rst @@ -0,0 +1,15 @@ +.. change:: + :tags: bug, orm, oracle, postgresql + :tickets: 8056 + + Fixed bug, likely a regression from 1.3, where usage of column names that + require bound parameter escaping, more concretely when using Oracle with + column names that require quoting such as those that start with an + underscore, or in less common cases with some PostgreSQL drivers when using + column names that contain percent signs, would cause the ORM versioning + feature to not work correctly if the versioning column itself had such a + name, as the ORM assumes certain bound parameter naming conventions that + were being interfered with via the quotes. This issue is related to + :ticket:`8053` and essentially revises the approach towards fixing this, + revising the original issue :ticket:`5653` that created the initial + implementation for generalized bound-parameter name quoting. diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 5a1443ecbc1..cc0844e1c3f 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1079,21 +1079,44 @@ def _init_compiled( if encode: encoder = dialect._encoder for compiled_params in self.compiled_parameters: + escaped_bind_names = compiled.escaped_bind_names if encode: - param = { - encoder(key)[0]: processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - for key in compiled_params - } + if escaped_bind_names: + param = { + encoder(escaped_bind_names.get(key, key))[ + 0 + ]: processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + for key in compiled_params + } + else: + param = { + encoder(key)[0]: processors[key]( + compiled_params[key] + ) + if key in processors + else compiled_params[key] + for key in compiled_params + } else: - param = { - key: processors[key](compiled_params[key]) - if key in processors - else compiled_params[key] - for key in compiled_params - } + if escaped_bind_names: + param = { + escaped_bind_names.get(key, key): processors[key]( + compiled_params[key] + ) + if key in processors + else compiled_params[key] + for key in compiled_params + } + else: + param = { + key: processors[key](compiled_params[key]) + if key in processors + else compiled_params[key] + for key in compiled_params + } parameters.append(param) diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index fa158863da9..2f3033d7058 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -898,14 +898,10 @@ def _apply_numbered_params(self): @util.memoized_property def _bind_processors(self): - _escaped_bind_names = self.escaped_bind_names - has_escaped_names = bool(_escaped_bind_names) return dict( ( - _escaped_bind_names.get(key, key) - if has_escaped_names - else key, + key, value, ) for key, value in ( @@ -939,8 +935,6 @@ def construct_params( ): """return a dictionary of bind parameter keys and values""" - has_escaped_names = bool(self.escaped_bind_names) - if extracted_parameters: # related the bound parameters collected in the original cache key # to those collected in the incoming cache key. They will not have @@ -971,16 +965,10 @@ def construct_params( if params: pd = {} for bindparam, name in self.bind_names.items(): - escaped_name = ( - self.escaped_bind_names.get(name, name) - if has_escaped_names - else name - ) - if bindparam.key in params: - pd[escaped_name] = params[bindparam.key] + pd[name] = params[bindparam.key] elif name in params: - pd[escaped_name] = params[name] + pd[name] = params[name] elif _check and bindparam.required: if _group_number: @@ -1005,19 +993,13 @@ def construct_params( value_param = bindparam if bindparam.callable: - pd[escaped_name] = value_param.effective_value + pd[name] = value_param.effective_value else: - pd[escaped_name] = value_param.value + pd[name] = value_param.value return pd else: pd = {} for bindparam, name in self.bind_names.items(): - escaped_name = ( - self.escaped_bind_names.get(name, name) - if has_escaped_names - else name - ) - if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( @@ -1039,9 +1021,9 @@ def construct_params( value_param = bindparam if bindparam.callable: - pd[escaped_name] = value_param.effective_value + pd[name] = value_param.effective_value else: - pd[escaped_name] = value_param.value + pd[name] = value_param.value return pd @util.memoized_instancemethod @@ -1139,6 +1121,7 @@ def _process_parameters_for_postcompile( N as a bound parameter. """ + if parameters is None: parameters = self.construct_params() @@ -1181,10 +1164,11 @@ def _process_parameters_for_postcompile( if self.escaped_bind_names else name ) + parameter = self.binds[name] if parameter in self.literal_execute_params: if escaped_name not in replacement_expressions: - value = parameters.pop(escaped_name) + value = parameters.pop(name) replacement_expressions[ escaped_name @@ -1203,7 +1187,12 @@ def _process_parameters_for_postcompile( # process it. the single name is being replaced with # individual numbered parameters for each value in the # param. - values = parameters.pop(escaped_name) + # + # note we are also inserting *escaped* parameter names + # into the given dictionary. default dialect will + # use these param names directly as they will not be + # in the escaped_bind_names dictionary. + values = parameters.pop(name) leep = self._literal_execute_expanding_parameter to_update, replacement_expr = leep( @@ -1301,15 +1290,7 @@ def _create_result_map(self): @util.memoized_property def _within_exec_param_key_getter(self): getter = self._key_getters_for_crud_column[2] - if self.escaped_bind_names: - - def _get(obj): - key = getter(obj) - return self.escaped_bind_names.get(key, key) - - return _get - else: - return getter + return getter @util.memoized_property @util.preload_module("sqlalchemy.engine.result") diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index 8c2e9d8de6c..3f8f749bfe8 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -138,6 +138,10 @@ def go(**kw): return go + @config.fixture + def fixture_session(self): + return fixture_session() + @config.fixture() def metadata(self, request): """Provide bound MetaData for a single test, dropping afterwards.""" diff --git a/test/orm/test_versioning.py b/test/orm/test_versioning.py index ce01cace7f9..30730122410 100644 --- a/test/orm/test_versioning.py +++ b/test/orm/test_versioning.py @@ -2006,3 +2006,55 @@ def test_explicit_assign_from_expired(self): f1.value = "f2" f1.version_id = 2 s1.flush() + + +class QuotedBindVersioningTest(fixtures.MappedTest): + """test for #8056""" + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "version_table", + metadata, + Column( + "id", Integer, primary_key=True, test_needs_autoincrement=True + ), + # will need parameter quoting for Oracle and PostgreSQL + # dont use 'key' to make sure no the awkward name is definitely + # in the params + Column("_version%id", Integer, nullable=False), + Column("value", String(40), nullable=False), + ) + + @classmethod + def setup_classes(cls): + class Foo(cls.Basic): + pass + + @classmethod + def setup_mappers(cls): + Foo = cls.classes.Foo + vt = cls.tables.version_table + cls.mapper_registry.map_imperatively( + Foo, + vt, + version_id_col=vt.c["_version%id"], + properties={"version": vt.c["_version%id"]}, + ) + + def test_round_trip(self, fixture_session): + Foo = self.classes.Foo + + f1 = Foo(value="v1") + fixture_session.add(f1) + fixture_session.commit() + + f1.value = "v2" + with conditional_sane_rowcount_warnings( + update=True, only_returning=True + ): + fixture_session.commit() + + eq_(f1.version, 2) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 99addb986d3..250e8b30cf9 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3659,9 +3659,13 @@ def test_binds(self): def test_bind_param_escaping(self): """general bind param escape unit tests added as a result of - #8053 - # - #""" + #8053. + + However, note that the final application of an escaped param name + was moved out of compiler and into DefaultExecutionContext in + related issue #8056. + + """ SomeEnum = pep435_enum("SomeEnum") one = SomeEnum("one", 1) @@ -3694,8 +3698,13 @@ def bindparam_string(self, name, **kw): dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) ) params = compiled.construct_params({"_id": 1, "_data": one}) - eq_(params, {'"_id"': 1, '"_data"': one}) - eq_(compiled._bind_processors, {'"_data"': mock.ANY}) + + eq_(params, {"_id": 1, "_data": one}) + eq_(compiled._bind_processors, {"_data": mock.ANY}) + + # previously, this was: + # eq_(params, {'"_id"': 1, '"_data"': one}) + # eq_(compiled._bind_processors, {'"_data"': mock.ANY}) def test_expanding_non_expanding_conflict(self): """test #8018""" diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index c14b8b4c68b..1695771486a 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -196,6 +196,8 @@ def visit_grouping(self, elem): def test_bindparam_key_proc_for_copies(self, meth, name): r"""test :ticket:`6249`. + Revised for :ticket:`8056`. + The key of the bindparam needs spaces and other characters escaped out for the POSTCOMPILE regex to work correctly. From e247b3450945a46eff1a0c96de8d46384ac1d644 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 27 May 2022 16:07:01 -0400 Subject: [PATCH 249/632] remove "deannotate" from column_property expression Fixed issue where using a :func:`_orm.column_property` construct containing a subquery against an already-mapped column attribute would not correctly apply ORM-compilation behaviors to the subquery, including that the "IN" expression added for a single-table inherits expression would fail to be included. This fix involves a few tweaks in the ORM adaptation logic, including a missing "parententity" adaptation on the mapper side. The specific mechanics here have a lot of moving parts so we will continue to add tests to assert these cases. In particular a more complete test for issue #2316 is added that was relying upon the deannotate happening here. Fixes: #8064 Change-Id: Ia85dd12dcf6e7c002b30de4a27d7aa66cb3cd20e (cherry picked from commit 8c5cc8ae255a7580e2ff339659cf66cd2c6e02c1) --- doc/build/changelog/unreleased_14/8064.rst | 9 ++++++ lib/sqlalchemy/orm/mapper.py | 8 +++--- lib/sqlalchemy/orm/properties.py | 10 +------ lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/testing/fixtures.py | 4 +++ test/orm/inheritance/test_single.py | 33 ++++++++++++++++++++++ test/orm/test_eager_relations.py | 31 ++++++++++++++++++++ test/orm/test_mapper.py | 27 ++++++++++++------ 8 files changed, 101 insertions(+), 23 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8064.rst diff --git a/doc/build/changelog/unreleased_14/8064.rst b/doc/build/changelog/unreleased_14/8064.rst new file mode 100644 index 00000000000..ccac2ad03db --- /dev/null +++ b/doc/build/changelog/unreleased_14/8064.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 8064 + + Fixed issue where using a :func:`_orm.column_property` construct containing + a subquery against an already-mapped column attribute would not correctly + apply ORM-compilation behaviors to the subquery, including that the "IN" + expression added for a single-table inherits expression would fail to be + included. diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index ad68820125f..21809e7e6ea 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1755,7 +1755,7 @@ def _configure_property(self, key, prop, init=True, setparent=True): col.key = col._tq_key_label = key self.columns.add(col, key) - for col in prop.columns + prop._orig_columns: + for col in prop.columns: for col in col.proxy_set: self._columntoproperty[col] = prop @@ -2091,9 +2091,9 @@ def _selectable_from_mappers(self, mappers, innerjoin): @HasMemoized.memoized_attribute def _single_table_criterion(self): if self.single and self.inherits and self.polymorphic_on is not None: - return self.polymorphic_on._annotate({"parentmapper": self}).in_( - m.polymorphic_identity for m in self.self_and_descendants - ) + return self.polymorphic_on._annotate( + {"parententity": self, "parentmapper": self} + ).in_(m.polymorphic_identity for m in self.self_and_descendants) else: return None diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index b5ac9b87945..d32af17464c 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -20,7 +20,6 @@ from .interfaces import PropComparator from .interfaces import StrategizedProperty from .relationships import RelationshipProperty -from .util import _orm_full_deannotate from .. import log from .. import util from ..sql import coercions @@ -49,7 +48,6 @@ class ColumnProperty(StrategizedProperty): _links_to_entity = False __slots__ = ( - "_orig_columns", "columns", "group", "deferred", @@ -155,14 +153,8 @@ def __init__(self, *columns, **kwargs): """ super(ColumnProperty, self).__init__() - self._orig_columns = [ - coercions.expect(roles.LabeledColumnExprRole, c) for c in columns - ] self.columns = [ - coercions.expect( - roles.LabeledColumnExprRole, _orm_full_deannotate(c) - ) - for c in columns + coercions.expect(roles.LabeledColumnExprRole, c) for c in columns ] self.group = kwargs.pop("group", None) self.deferred = kwargs.pop("deferred", False) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 7f72c1fc086..4afcd0fb862 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -415,7 +415,7 @@ def __init__( def _include_fn(self, elem): entity = elem._annotations.get("parentmapper", None) - return not entity or entity.isa(self.mapper) + return not entity or entity.common_parent(self.mapper) class AliasedClass(object): diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index 8c2e9d8de6c..35fcabaab1e 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -91,6 +91,10 @@ def registry(self, metadata): yield reg reg.dispose() + @config.fixture + def decl_base(self, registry): + return registry.generate_base() + @config.fixture() def future_connection(self, future_engine, connection): # integrate the future_engine and connection fixtures so diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py index fbafdd85be7..6f611eb3ad6 100644 --- a/test/orm/inheritance/test_single.py +++ b/test/orm/inheritance/test_single.py @@ -13,6 +13,7 @@ from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle +from sqlalchemy.orm import column_property from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from sqlalchemy.orm import Session @@ -1979,3 +1980,35 @@ def setup_classes(cls): super(EagerDefaultEvalTestPolymorphic, cls).setup_classes( with_polymorphic="*" ) + + +class ColExprTest(AssertsCompiledSQL, fixtures.TestBase): + def test_discrim_on_column_prop(self, registry): + Base = registry.generate_base() + + class Employee(Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + type = Column(String(20)) + + __mapper_args__ = { + "polymorphic_on": "type", + "polymorphic_identity": "employee", + } + + class Engineer(Employee): + __mapper_args__ = {"polymorphic_identity": "engineer"} + + class Company(Base): + __tablename__ = "company" + id = Column(Integer, primary_key=True) + + max_engineer_id = column_property( + select(func.max(Engineer.id)).scalar_subquery() + ) + + self.assert_compile( + select(Company.max_engineer_id), + "SELECT (SELECT max(employee.id) AS max_1 FROM employee " + "WHERE employee.type IN (__[POSTCOMPILE_type_1])) AS anon_1", + ) diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index 879cc2b8172..b2a5ed33f39 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -179,6 +179,37 @@ def go(): # has to lazy load the addresses self.assert_sql_count(testing.db, go, 1) + def test_column_property_adaptation(self, decl_base): + """test #2316 in support of #8064""" + + class A(decl_base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + type = Column(String(40), nullable=False) + __mapper_args__ = {"polymorphic_on": type} + + A.anything = column_property(A.id + 1000) + + class B(A): + __tablename__ = "b" + account_id = Column(Integer, ForeignKey("a.id"), primary_key=True) + x_id = Column(Integer, ForeignKey("x.id"), nullable=False) + __mapper_args__ = {"polymorphic_identity": "named"} + + class X(decl_base): + __tablename__ = "x" + id = Column(Integer, primary_key=True) + b = relationship("B") + + self.assert_compile( + select(X).options(joinedload(X.b)), + "SELECT x.id, a_1.id AS id_1, a_1.type, a_1.id + :id_2 AS anon_1, " + "b_1.account_id, b_1.x_id FROM x " + "LEFT OUTER JOIN " + "(a AS a_1 JOIN b AS b_1 ON a_1.id = b_1.account_id) " + "ON x.id = b_1.x_id", + ) + def test_no_render_in_subquery(self): """test #6378""" diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index 9e2a7f63a66..c8a87cf5b7d 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -846,7 +846,8 @@ def test_replace_rel_prop_with_rel_warns(self): ) @testing.combinations((True,), (False,)) - def test_add_column_prop_deannotate(self, autoalias): + def test_add_column_prop_adaption(self, autoalias): + """test ultimately from #2316 revised for #8064""" User, users = self.classes.User, self.tables.users Address, addresses = self.classes.Address, self.tables.addresses @@ -907,9 +908,13 @@ class SubUser(User): "users_1.id = addresses.user_id", ) - def test_column_prop_deannotate(self): - """test that column property deannotates, - bringing expressions down to the original mapped columns. + def test_column_prop_stays_annotated(self): + """test ultimately from #2316 revised for #8064. + + previously column_property() would deannotate the given expression, + however this interfered with some compilation sceanrios. + + """ User, users = self.classes.User, self.tables.users m = self.mapper(User, users) @@ -921,14 +926,18 @@ def test_column_prop_deannotate(self): m.add_property("y", column_property(expr2.scalar_subquery())) assert User.x.property.columns[0] is not expr - assert User.x.property.columns[0].element.left is users.c.name - # a deannotate needs to clone the base, in case - # the original one referenced annotated elements. - assert User.x.property.columns[0].element.right is not expr.right + + assert ( + User.x.property.columns[0].element.left + is User.name.comparator.expr + ) + + assert User.x.property.columns[0].element.right is expr.right assert User.y.property.columns[0] is not expr2 assert ( - User.y.property.columns[0].element._raw_columns[0] is users.c.name + User.y.property.columns[0].element._raw_columns[0] + is User.name.expression ) assert User.y.property.columns[0].element._raw_columns[1] is users.c.id From 6a9f23038ea8ace5c7fb2c323f2ee38a15fa07dd Mon Sep 17 00:00:00 2001 From: Josep Pascual Badia Date: Mon, 30 May 2022 10:41:55 -0400 Subject: [PATCH 250/632] repair incorrect "cursor" var name in connection faq Closes: #8075 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8075 Pull-request-sha: 34e5eaf3870f89c9c38ffe81121fa1b42e363752 Change-Id: Iab8cdc9f8da68ac955eea75efeba263d0a9dcb7b (cherry picked from commit d97de97eff21af3bdacffc2b625feb7e0bd6c18c) --- doc/build/faq/connections.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 1bee24c3247..504c47485b6 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -255,14 +255,14 @@ statement executions:: def reconnecting_engine(engine, num_retries, retry_interval): - def _run_with_retries(fn, context, cursor, statement, *arg, **kw): + def _run_with_retries(fn, context, cursor_obj, statement, *arg, **kw): for retry in range(num_retries + 1): try: - fn(cursor, statement, context=context, *arg) + fn(cursor_obj, statement, context=context, *arg) except engine.dialect.dbapi.Error as raw_dbapi_err: connection = context.root_connection if engine.dialect.is_disconnect( - raw_dbapi_err, connection, cursor + raw_dbapi_err, connection, cursor_obj ): if retry > num_retries: raise From 8564e2abf97795819f655a70b19b3bc820729c79 Mon Sep 17 00:00:00 2001 From: Christopher Jones Date: Tue, 31 May 2022 08:27:18 -0400 Subject: [PATCH 251/632] Handle dead-connection errors for users of python-oracledb Added two new error codes for Oracle disconnect handling to support early testing of the new "python-oracledb" driver released by Oracle. Fixes: #8066 Closes: #8065 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8065 Pull-request-sha: d630b8457a1d29b7a1354ccc6d5e2956eea865f6 Change-Id: Ib14dbb888597b1087b1bb7c505ccad59df226177 (cherry picked from commit 2bf00472bfafd8fd0cca5b4fe55ff4faf1a1279e) --- doc/build/changelog/unreleased_14/8066.rst | 6 ++++++ lib/sqlalchemy/dialects/oracle/cx_oracle.py | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8066.rst diff --git a/doc/build/changelog/unreleased_14/8066.rst b/doc/build/changelog/unreleased_14/8066.rst new file mode 100644 index 00000000000..5f814ab3ab1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8066.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: usecase, oracle + :tickets: 8066 + + Added two new error codes for Oracle disconnect handling to support early + testing of the new "python-oracledb" driver released by Oracle. diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 4c89ed7355d..64029a47966 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1335,9 +1335,12 @@ def is_disconnect(self, e, connection, cursor): # TODO: Others ? return True - if re.match(r"^(?:DPI-1010|DPI-1080)", str(e)): + if re.match(r"^(?:DPI-1010|DPI-1080|DPY-1001|DPY-4011)", str(e)): # DPI-1010: not connected # DPI-1080: connection was closed by ORA-3113 + # python-oracledb's DPY-1001: not connected to database + # python-oracledb's DPY-4011: the database or network closed the + # connection # TODO: others? return True From 96d40575ec612aaf00695f113dbcfe95f8cc80b4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 31 May 2022 10:48:16 -0400 Subject: [PATCH 252/632] raise informative error when selectable can't be extended An informative error is raised for the use case where :meth:`.Insert.from_select` is being passed a "compound select" object such as a UNION, yet the INSERT statement needs to append additional columns to support Python-side or explicit SQL defaults from the table metadata. In this case a subquery of the compound object should be passed. Fixes: #8073 Change-Id: Ic4a5dbf84ec49d2451901be05cb9cf6ae93f02b7 (cherry picked from commit 7474df2159f89d684d32aabb15014ef95cea1641) --- doc/build/changelog/unreleased_14/8073.rst | 9 +++ lib/sqlalchemy/sql/crud.py | 17 +++++- test/sql/test_insert.py | 70 ++++++++++++++++++++++ 3 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8073.rst diff --git a/doc/build/changelog/unreleased_14/8073.rst b/doc/build/changelog/unreleased_14/8073.rst new file mode 100644 index 00000000000..57add15b81b --- /dev/null +++ b/doc/build/changelog/unreleased_14/8073.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 8073 + + An informative error is raised for the use case where + :meth:`.Insert.from_select` is being passed a "compound select" object such + as a UNION, yet the INSERT statement needs to append additional columns to + support Python-side or explicit SQL defaults from the table metadata. In + this case a subquery of the compound object should be passed. diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index 804777c29ee..920c8b35687 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -16,6 +16,7 @@ from . import dml from . import elements from . import roles +from .selectable import Select from .. import exc from .. import util @@ -339,10 +340,20 @@ def _scan_insert_from_select_cols( if add_select_cols: values.extend(add_select_cols) ins_from_select = compiler.stack[-1]["insert_from_select"] + if not isinstance(ins_from_select, Select): + raise exc.CompileError( + "Can't extend statement for INSERT..FROM SELECT to include " + "additional default-holding column(s) " + "%s. Convert the selectable to a subquery() first, or pass " + "include_defaults=False to Insert.from_select() to skip these " + "columns." + % (", ".join(repr(key) for _, key, _ in add_select_cols),) + ) ins_from_select = ins_from_select._generate() - ins_from_select._raw_columns = tuple( - ins_from_select._raw_columns - ) + tuple(expr for col, col_expr, expr in add_select_cols) + # copy raw_columns + ins_from_select._raw_columns = list(ins_from_select._raw_columns) + [ + expr for col, col_expr, expr in add_select_cols + ] compiler.stack[-1]["insert_from_select"] = ins_from_select diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py index 51045daac22..741859fb2cf 100644 --- a/test/sql/test_insert.py +++ b/test/sql/test_insert.py @@ -24,6 +24,7 @@ from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures @@ -662,6 +663,75 @@ def foo(ctx): checkparams={"name_1": "foo", "foo": None}, ) + def test_insert_from_select_fn_defaults_compound(self): + """test #8073""" + + metadata = MetaData() + + table = Table( + "sometable", + metadata, + Column("id", Integer, primary_key=True), + Column("foo", Integer, default="foo"), + Column("bar", Integer, default="bar"), + ) + table1 = self.tables.mytable + sel = ( + select(table1.c.myid) + .where(table1.c.name == "foo") + .union(select(table1.c.myid).where(table1.c.name == "foo")) + ) + ins = table.insert().from_select(["id"], sel) + with expect_raises_message( + exc.CompileError, + r"Can't extend statement for INSERT..FROM SELECT to include " + r"additional default-holding column\(s\) 'foo', 'bar'. " + r"Convert the selectable to a subquery\(\) first, or pass " + r"include_defaults=False to Insert.from_select\(\) to skip these " + r"columns.", + ): + ins.compile() + + def test_insert_from_select_fn_defaults_compound_subquery(self): + """test #8073""" + + metadata = MetaData() + + def foo(ctx): + return 12 + + table = Table( + "sometable", + metadata, + Column("id", Integer, primary_key=True), + Column("foo", Integer, default="foo"), + Column("bar", Integer, default="bar"), + ) + table1 = self.tables.mytable + sel = ( + select(table1.c.myid) + .where(table1.c.name == "foo") + .union(select(table1.c.myid).where(table1.c.name == "foo")) + .subquery() + ) + + ins = table.insert().from_select(["id"], sel) + self.assert_compile( + ins, + "INSERT INTO sometable (id, foo, bar) SELECT anon_1.myid, " + ":foo AS anon_2, :bar AS anon_3 FROM " + "(SELECT mytable.myid AS myid FROM mytable " + "WHERE mytable.name = :name_1 UNION " + "SELECT mytable.myid AS myid FROM mytable " + "WHERE mytable.name = :name_2) AS anon_1", + checkparams={ + "foo": None, + "bar": None, + "name_1": "foo", + "name_2": "foo", + }, + ) + def test_insert_from_select_dont_mutate_raw_columns(self): # test [ticket:3603] from sqlalchemy import table From 1d77afcfc46cb15a6b2e3317f985df4e60a70286 Mon Sep 17 00:00:00 2001 From: Patrick Gerken Date: Tue, 31 May 2022 21:42:35 +0200 Subject: [PATCH 253/632] Update declarative_styles.rst: Update code example to attrs TNG usage. (#8072) * Update declarative_styles.rst Update docs to new style usage of attrs. This is the default since December 2021. While the old style still works, the newer one looks much nicer and is likely to be dominant pretty quickly. Imho. * Update declarative_styles.rst * Update declarative_styles.rst (cherry picked from commit 1e1431fbfb4992f8d85d3fd42ec06975e7f762f0) --- doc/build/orm/declarative_styles.rst | 35 ++++++++++++++++------------ 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index 24d9eec5ecd..dd72e9c1a0c 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -197,19 +197,20 @@ Declarative Mapping with Dataclasses and Attrs The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class decorator to automatically generate boilerplate definitions of ``__init__()``, ``__eq__()``, ``__repr()__``, etc. methods. Another very popular library that does -the same, and much more, is attrs_. Both libraries make use of class -decorators in order to scan a class for attributes that define the class' -behavior, which are then used to generate methods, documentation, and annotations. +the same, and much more, is attrs_, which uses the ``@define`` decorator. +Both libraries make use of class decorators in order to scan a class for +attributes that define the class' behavior, which are then used to generate +methods, documentation, and annotations. The :meth:`_orm.registry.mapped` class decorator allows the declarative mapping of a class to occur after the class has been fully constructed, allowing the class to be processed by other class decorators first. The ``@dataclass`` -and ``@attr.s`` decorators may therefore be applied first before the +and ``@define`` decorators may therefore be applied first before the ORM mapping process proceeds via the :meth:`_orm.registry.mapped` decorator or via the :meth:`_orm.registry.map_imperatively` method discussed in a later section. -Mapping with ``@dataclass`` or ``@attr.s`` may be used in a straightforward +Mapping with ``@dataclass`` or ``@define`` may be used in a straightforward way with :ref:`orm_imperative_table_configuration` style, where the the :class:`_schema.Table`, which means that it is defined separately and associated with the class via the ``__table__``. For dataclasses specifically, @@ -225,9 +226,13 @@ is to be mapped to a :class:`_schema.Column`, checks explicitly if the attribute is part of a Dataclasses setup, and if so will **replace** the class-bound dataclass attribute with its usual mapped properties. The ``__init__`` method created by ``@dataclass`` is left -intact. In contrast, the ``@attr.s`` decorator actually removes its -own class-bound attributes after the decorator runs, so that SQLAlchemy's -mapping process takes over these attributes without any issue. +intact. The ``@define`` decorator of attrs_ by default replaces the annotated class +with a new __slots__ based class, which is not supported. When using the old +style annotation ``@attr.s`` or using ``define(slots=False)``, the class +does not get replaced. Furthermore attrs removes its own class-bound attributes +after the decorator runs, so that SQLAlchemy's mapping process takes over these +attributes without any issue. Both decorators, ``@attr.s`` and ``@define(slots=False)`` +work with SQLAlchemy. .. versionadded:: 1.4 Added support for direct mapping of Python dataclasses, where the :class:`_orm.Mapper` will now detect attributes that are specific @@ -440,7 +445,7 @@ came from a mixin that is itself a dataclass, the form would be:: Example Three - attrs with Imperative Table ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -A mapping using ``@attr.s``, in conjunction with imperative table:: +A mapping using ``@define`` from attrs_, in conjunction with imperative table:: import attr from sqlalchemy.orm import registry @@ -452,7 +457,7 @@ A mapping using ``@attr.s``, in conjunction with imperative table:: @mapper_registry.mapped - @attr.s + @define(slots=False) class User: __table__ = Table( "user", @@ -462,11 +467,11 @@ A mapping using ``@attr.s``, in conjunction with imperative table:: Column("fullname", String(50)), Column("nickname", String(12)), ) - id = attr.ib() - name = attr.ib() - fullname = attr.ib() - nickname = attr.ib() - addresses = attr.ib() + id: int + name: str + fullname: str + nickname: str + addresses: List[Address] # other classes... From c2366fa222b3dc6c9373c8a0c659c61a8c943f94 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 31 May 2022 17:21:39 -0400 Subject: [PATCH 254/632] changelog fixes Change-Id: Id3fc3019f8eb799aa5adf6ca28e0aeedc0da31dd (cherry picked from commit 1e8e4ace63f93327e5c7e000acb6c0efb595eed1) --- doc/build/changelog/unreleased_14/7979.rst | 4 ++-- doc/build/changelog/unreleased_14/8014.rst | 15 ++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7979.rst b/doc/build/changelog/unreleased_14/7979.rst index 9a82a290979..e4aec5c403b 100644 --- a/doc/build/changelog/unreleased_14/7979.rst +++ b/doc/build/changelog/unreleased_14/7979.rst @@ -4,6 +4,6 @@ Fixed an issue where using :func:`.bindparam` with no explicit data or type given could be coerced into the incorrect type when used in expressions - such as when using :meth:`.ARRAY.comparator.any` and - :meth:`.ARRAY.comparator.all`. + such as when using :meth:`.ARRAY.Comparator.any` and + :meth:`.ARRAY.Comparator.all`. diff --git a/doc/build/changelog/unreleased_14/8014.rst b/doc/build/changelog/unreleased_14/8014.rst index 331a9577c58..168100bb224 100644 --- a/doc/build/changelog/unreleased_14/8014.rst +++ b/doc/build/changelog/unreleased_14/8014.rst @@ -2,10 +2,11 @@ :tags: bug, sql, postgresql, sqlite :tickets: 8014 - Fixed bug where the PostgreSQL :meth:`_postgresql.Insert.on_conflict` - method and the SQLite :meth:`_sqlite.Insert.on_conflict` method would both - fail to correctly accommodate a column with a separate ".key" when - specifying the column using its key name in the dictionary passed to - ``set_``, as well as if the :attr:`_sqlite.Insert.excluded` or - :attr:`_postgresql.Insert.excluded` collection were used as the dictionary - directly. + Fixed bug where the PostgreSQL + :meth:`_postgresql.Insert.on_conflict_do_update` method and the SQLite + :meth:`_sqlite.Insert.on_conflict_do_update` method would both fail to + correctly accommodate a column with a separate ".key" when specifying the + column using its key name in the dictionary passed to + :paramref:`_postgresql.Insert.on_conflict_do_update.set_`, as well as if + the :attr:`_postgresql.Insert.excluded` collection were used as the + dictionary directly. From e347aa81a593252b6ee4f84a411f62f526ba0c77 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 31 May 2022 17:23:09 -0400 Subject: [PATCH 255/632] - 1.4.37 --- doc/build/changelog/changelog_14.rst | 139 ++++++++++++++++++++- doc/build/changelog/unreleased_14/7966.rst | 7 -- doc/build/changelog/unreleased_14/7979.rst | 9 -- doc/build/changelog/unreleased_14/8001.rst | 8 -- doc/build/changelog/unreleased_14/8014.rst | 12 -- doc/build/changelog/unreleased_14/8018.rst | 11 -- doc/build/changelog/unreleased_14/8019.rst | 7 -- doc/build/changelog/unreleased_14/8035.rst | 6 - doc/build/changelog/unreleased_14/8036.rst | 8 -- doc/build/changelog/unreleased_14/8053.rst | 11 -- doc/build/changelog/unreleased_14/8056.rst | 15 --- doc/build/changelog/unreleased_14/8062.rst | 5 - doc/build/changelog/unreleased_14/8064.rst | 9 -- doc/build/changelog/unreleased_14/8066.rst | 6 - doc/build/changelog/unreleased_14/8073.rst | 9 -- doc/build/conf.py | 4 +- 16 files changed, 140 insertions(+), 126 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7966.rst delete mode 100644 doc/build/changelog/unreleased_14/7979.rst delete mode 100644 doc/build/changelog/unreleased_14/8001.rst delete mode 100644 doc/build/changelog/unreleased_14/8014.rst delete mode 100644 doc/build/changelog/unreleased_14/8018.rst delete mode 100644 doc/build/changelog/unreleased_14/8019.rst delete mode 100644 doc/build/changelog/unreleased_14/8035.rst delete mode 100644 doc/build/changelog/unreleased_14/8036.rst delete mode 100644 doc/build/changelog/unreleased_14/8053.rst delete mode 100644 doc/build/changelog/unreleased_14/8056.rst delete mode 100644 doc/build/changelog/unreleased_14/8062.rst delete mode 100644 doc/build/changelog/unreleased_14/8064.rst delete mode 100644 doc/build/changelog/unreleased_14/8066.rst delete mode 100644 doc/build/changelog/unreleased_14/8073.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 82022c929d6..1c043fef4e9 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,144 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.37 - :include_notes_from: unreleased_14 + :released: May 31, 2022 + + .. change:: + :tags: bug, mssql + :tickets: 8062 + + Fix issue where a password with a leading "{" would result in login failure. + + .. change:: + :tags: bug, sql, postgresql, sqlite + :tickets: 8014 + + Fixed bug where the PostgreSQL + :meth:`_postgresql.Insert.on_conflict_do_update` method and the SQLite + :meth:`_sqlite.Insert.on_conflict_do_update` method would both fail to + correctly accommodate a column with a separate ".key" when specifying the + column using its key name in the dictionary passed to + :paramref:`_postgresql.Insert.on_conflict_do_update.set_`, as well as if + the :attr:`_postgresql.Insert.excluded` collection were used as the + dictionary directly. + + .. change:: + :tags: bug, sql + :tickets: 8073 + + An informative error is raised for the use case where + :meth:`.Insert.from_select` is being passed a "compound select" object such + as a UNION, yet the INSERT statement needs to append additional columns to + support Python-side or explicit SQL defaults from the table metadata. In + this case a subquery of the compound object should be passed. + + .. change:: + :tags: bug, orm + :tickets: 8064 + + Fixed issue where using a :func:`_orm.column_property` construct containing + a subquery against an already-mapped column attribute would not correctly + apply ORM-compilation behaviors to the subquery, including that the "IN" + expression added for a single-table inherits expression would fail to be + included. + + .. change:: + :tags: bug, orm + :tickets: 8001 + + Fixed issue where ORM results would apply incorrect key names to the + returned :class:`.Row` objects in the case where the set of columns to be + selected were changed, such as when using + :meth:`.Select.with_only_columns`. + + .. change:: + :tags: bug, mysql + :tickets: 7966 + + Further adjustments to the MySQL PyODBC dialect to allow for complete + connectivity, which was previously still not working despite fixes in + :ticket:`7871`. + + .. change:: + :tags: bug, sql + :tickets: 7979 + + Fixed an issue where using :func:`.bindparam` with no explicit data or type + given could be coerced into the incorrect type when used in expressions + such as when using :meth:`.ARRAY.Comparator.any` and + :meth:`.ARRAY.Comparator.all`. + + + .. change:: + :tags: bug, oracle + :tickets: 8053 + + Fixed SQL compiler issue where the "bind processing" function for a bound + parameter would not be correctly applied to a bound value if the bound + parameter's name were "escaped". Concretely, this applies, among other + cases, to Oracle when a :class:`.Column` has a name that itself requires + quoting, such that the quoting-required name is then used for the bound + parameters generated within DML statements, and the datatype in use + requires bind processing, such as the :class:`.Enum` datatype. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 8035 + + Explicitly specify the collation when reflecting table columns using + MSSQL to prevent "collation conflict" errors. + + .. change:: + :tags: bug, orm, oracle, postgresql + :tickets: 8056 + + Fixed bug, likely a regression from 1.3, where usage of column names that + require bound parameter escaping, more concretely when using Oracle with + column names that require quoting such as those that start with an + underscore, or in less common cases with some PostgreSQL drivers when using + column names that contain percent signs, would cause the ORM versioning + feature to not work correctly if the versioning column itself had such a + name, as the ORM assumes certain bound parameter naming conventions that + were being interfered with via the quotes. This issue is related to + :ticket:`8053` and essentially revises the approach towards fixing this, + revising the original issue :ticket:`5653` that created the initial + implementation for generalized bound-parameter name quoting. + + .. change:: + :tags: bug, mysql + :tickets: 8036 + + Added disconnect code for MySQL error 4031, introduced in MySQL >= 8.0.24, + indicating connection idle timeout exceeded. In particular this repairs an + issue where pre-ping could not reconnect on a timed-out connection. Pull + request courtesy valievkarim. + + .. change:: + :tags: bug, sql + :tickets: 8018 + + An informative error is raised if two individual :class:`.BindParameter` + objects share the same name, yet one is used within an "expanding" context + (typically an IN expression) and the other is not; mixing the same name in + these two different styles of usage is not supported and typically the + ``expanding=True`` parameter should be set on the parameters that are to + receive list values outside of IN expressions (where ``expanding`` is set + by default). + + .. change:: + :tags: bug, engine, tests + :tickets: 8019 + + Fixed issue where support for logging "stacklevel" implemented in + :ticket:`7612` required adjustment to work with recently released Python + 3.11.0b1, also repairs the unit tests which tested this feature. + + .. change:: + :tags: usecase, oracle + :tickets: 8066 + + Added two new error codes for Oracle disconnect handling to support early + testing of the new "python-oracledb" driver released by Oracle. .. changelog:: :version: 1.4.36 diff --git a/doc/build/changelog/unreleased_14/7966.rst b/doc/build/changelog/unreleased_14/7966.rst deleted file mode 100644 index b07baec4532..00000000000 --- a/doc/build/changelog/unreleased_14/7966.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 7966 - - Further adjustments to the MySQL PyODBC dialect to allow for complete - connectivity, which was previously still not working despite fixes in - :ticket:`7871`. diff --git a/doc/build/changelog/unreleased_14/7979.rst b/doc/build/changelog/unreleased_14/7979.rst deleted file mode 100644 index e4aec5c403b..00000000000 --- a/doc/build/changelog/unreleased_14/7979.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7979 - - Fixed an issue where using :func:`.bindparam` with no explicit data or type - given could be coerced into the incorrect type when used in expressions - such as when using :meth:`.ARRAY.Comparator.any` and - :meth:`.ARRAY.Comparator.all`. - diff --git a/doc/build/changelog/unreleased_14/8001.rst b/doc/build/changelog/unreleased_14/8001.rst deleted file mode 100644 index aa8251445a4..00000000000 --- a/doc/build/changelog/unreleased_14/8001.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8001 - - Fixed issue where ORM results would apply incorrect key names to the - returned :class:`.Row` objects in the case where the set of columns to be - selected were changed, such as when using - :meth:`.Select.with_only_columns`. diff --git a/doc/build/changelog/unreleased_14/8014.rst b/doc/build/changelog/unreleased_14/8014.rst deleted file mode 100644 index 168100bb224..00000000000 --- a/doc/build/changelog/unreleased_14/8014.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql, postgresql, sqlite - :tickets: 8014 - - Fixed bug where the PostgreSQL - :meth:`_postgresql.Insert.on_conflict_do_update` method and the SQLite - :meth:`_sqlite.Insert.on_conflict_do_update` method would both fail to - correctly accommodate a column with a separate ".key" when specifying the - column using its key name in the dictionary passed to - :paramref:`_postgresql.Insert.on_conflict_do_update.set_`, as well as if - the :attr:`_postgresql.Insert.excluded` collection were used as the - dictionary directly. diff --git a/doc/build/changelog/unreleased_14/8018.rst b/doc/build/changelog/unreleased_14/8018.rst deleted file mode 100644 index c4aae3eeac1..00000000000 --- a/doc/build/changelog/unreleased_14/8018.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8018 - - An informative error is raised if two individual :class:`.BindParameter` - objects share the same name, yet one is used within an "expanding" context - (typically an IN expression) and the other is not; mixing the same name in - these two different styles of usage is not supported and typically the - ``expanding=True`` parameter should be set on the parameters that are to - receive list values outside of IN expressions (where ``expanding`` is set - by default). diff --git a/doc/build/changelog/unreleased_14/8019.rst b/doc/build/changelog/unreleased_14/8019.rst deleted file mode 100644 index 854703bceaa..00000000000 --- a/doc/build/changelog/unreleased_14/8019.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, engine, tests - :tickets: 8019 - - Fixed issue where support for logging "stacklevel" implemented in - :ticket:`7612` required adjustment to work with recently released Python - 3.11.0b1, also repairs the unit tests which tested this feature. diff --git a/doc/build/changelog/unreleased_14/8035.rst b/doc/build/changelog/unreleased_14/8035.rst deleted file mode 100644 index ea6ece0556e..00000000000 --- a/doc/build/changelog/unreleased_14/8035.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, mssql, reflection - :tickets: 8035 - - Explicitly specify the collation when reflecting table columns using - MSSQL to prevent "collation conflict" errors. diff --git a/doc/build/changelog/unreleased_14/8036.rst b/doc/build/changelog/unreleased_14/8036.rst deleted file mode 100644 index 52b956b6b4a..00000000000 --- a/doc/build/changelog/unreleased_14/8036.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 8036 - - Added disconnect code for MySQL error 4031, introduced in MySQL >= 8.0.24, - indicating connection idle timeout exceeded. In particular this repairs an - issue where pre-ping could not reconnect on a timed-out connection. Pull - request courtesy valievkarim. diff --git a/doc/build/changelog/unreleased_14/8053.rst b/doc/build/changelog/unreleased_14/8053.rst deleted file mode 100644 index 316b6385941..00000000000 --- a/doc/build/changelog/unreleased_14/8053.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 8053 - - Fixed SQL compiler issue where the "bind processing" function for a bound - parameter would not be correctly applied to a bound value if the bound - parameter's name were "escaped". Concretely, this applies, among other - cases, to Oracle when a :class:`.Column` has a name that itself requires - quoting, such that the quoting-required name is then used for the bound - parameters generated within DML statements, and the datatype in use - requires bind processing, such as the :class:`.Enum` datatype. diff --git a/doc/build/changelog/unreleased_14/8056.rst b/doc/build/changelog/unreleased_14/8056.rst deleted file mode 100644 index a5a61fa3211..00000000000 --- a/doc/build/changelog/unreleased_14/8056.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, orm, oracle, postgresql - :tickets: 8056 - - Fixed bug, likely a regression from 1.3, where usage of column names that - require bound parameter escaping, more concretely when using Oracle with - column names that require quoting such as those that start with an - underscore, or in less common cases with some PostgreSQL drivers when using - column names that contain percent signs, would cause the ORM versioning - feature to not work correctly if the versioning column itself had such a - name, as the ORM assumes certain bound parameter naming conventions that - were being interfered with via the quotes. This issue is related to - :ticket:`8053` and essentially revises the approach towards fixing this, - revising the original issue :ticket:`5653` that created the initial - implementation for generalized bound-parameter name quoting. diff --git a/doc/build/changelog/unreleased_14/8062.rst b/doc/build/changelog/unreleased_14/8062.rst deleted file mode 100644 index ada473de9ca..00000000000 --- a/doc/build/changelog/unreleased_14/8062.rst +++ /dev/null @@ -1,5 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 8062 - - Fix issue where a password with a leading "{" would result in login failure. diff --git a/doc/build/changelog/unreleased_14/8064.rst b/doc/build/changelog/unreleased_14/8064.rst deleted file mode 100644 index ccac2ad03db..00000000000 --- a/doc/build/changelog/unreleased_14/8064.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8064 - - Fixed issue where using a :func:`_orm.column_property` construct containing - a subquery against an already-mapped column attribute would not correctly - apply ORM-compilation behaviors to the subquery, including that the "IN" - expression added for a single-table inherits expression would fail to be - included. diff --git a/doc/build/changelog/unreleased_14/8066.rst b/doc/build/changelog/unreleased_14/8066.rst deleted file mode 100644 index 5f814ab3ab1..00000000000 --- a/doc/build/changelog/unreleased_14/8066.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: usecase, oracle - :tickets: 8066 - - Added two new error codes for Oracle disconnect handling to support early - testing of the new "python-oracledb" driver released by Oracle. diff --git a/doc/build/changelog/unreleased_14/8073.rst b/doc/build/changelog/unreleased_14/8073.rst deleted file mode 100644 index 57add15b81b..00000000000 --- a/doc/build/changelog/unreleased_14/8073.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8073 - - An informative error is raised for the use case where - :meth:`.Insert.from_select` is being passed a "compound select" object such - as a UNION, yet the INSERT statement needs to append additional columns to - support Python-side or explicit SQL defaults from the table metadata. In - this case a subquery of the compound object should be passed. diff --git a/doc/build/conf.py b/doc/build/conf.py index 8567fed0602..7e32d4acea0 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.36" +release = "1.4.37" -release_date = "April 26, 2022" +release_date = "May 31, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 9edac949b4436792016fbcfa718a26169e27be76 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 31 May 2022 17:28:09 -0400 Subject: [PATCH 256/632] Version 1.4.38 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 1c043fef4e9..61e2fad816b 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.38 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.37 :released: May 31, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 0ed1fef0715..91a4b0767d4 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.37" +__version__ = "1.4.38" def __go(lcls): From c1b82a9b4f3157ada1b6077679ec393ebe173120 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 1 Jun 2022 12:11:19 -0400 Subject: [PATCH 257/632] propagate proxy_key from WrapsColumnExpression this allows cast() of a label() to propagate the proxy key outwards in the same way that it apparently works at the SQL level. This is stuffing even more rules into naming so basically seeing how far we can go without other cases starting to fail. Fixes: #8084 Change-Id: I20bd97dae798fee6492334c06934e807d0f269ef (cherry picked from commit 14250f2668151f1c4df86dbf962c771e9788111e) --- doc/build/changelog/unreleased_14/8084.rst | 10 ++++++++ lib/sqlalchemy/sql/elements.py | 8 +++++++ test/sql/test_compiler.py | 3 ++- test/sql/test_labels.py | 28 ++++++++++++++++++++++ 4 files changed, 48 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8084.rst diff --git a/doc/build/changelog/unreleased_14/8084.rst b/doc/build/changelog/unreleased_14/8084.rst new file mode 100644 index 00000000000..43095e8c938 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8084.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sql + :tickets: 8084 + + Enhanced the mechanism of :class:`.Cast` and other "wrapping" + column constructs to more fully preserve a wrapped :class:`.Label` + construct, including that the label name will be preserved in the + ``.c`` collection of a :class:`.Subquery`. The label was already + able to render in the SQL correctly on the outside of the construct + which it was wrapped inside. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index da9c5f6b569..0462b26482f 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1201,6 +1201,14 @@ def _dedupe_anon_label_idx(self, idx): else: return self._dedupe_anon_tq_label_idx(idx) + @property + def _proxy_key(self): + wce = self.wrapped_column_expression + + if not wce._is_text_clause: + return wce._proxy_key + return super(WrapsColumnExpression, self)._proxy_key + class BindParameter(roles.InElementRole, ColumnElement): r"""Represent a "bound expression". diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 250e8b30cf9..4db5f3df9d2 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3208,7 +3208,7 @@ def test_naming(self): (exprs[1], "hoho", "hoho(mytable.myid)", "hoho_1"), ( exprs[2], - "_no_label", + "name", "CAST(mytable.name AS NUMERIC)", "name", # due to [ticket:4449] ), @@ -3232,6 +3232,7 @@ def test_naming(self): t = table1 s1 = select(col).select_from(t) + eq_(col._proxy_key, key if key != "_no_label" else None) eq_(list(s1.subquery().c.keys()), [key]) if lbl: diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py index 8c8e9dbeda3..d385b9e8d14 100644 --- a/test/sql/test_labels.py +++ b/test/sql/test_labels.py @@ -26,6 +26,7 @@ from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import is_ from sqlalchemy.testing import mock from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table @@ -938,6 +939,33 @@ def test_type_coerce_auto_label_label_style_none(self): "some_table.name FROM some_table", ) + @testing.combinations("inside", "outside") + def test_wraps_col_expr_label_propagate(self, cast_location): + """test #8084""" + + table1 = self.table1 + + if cast_location == "inside": + expr = cast(table1.c.name, Integer).label("foo") + elif cast_location == "outside": + expr = cast(table1.c.name.label("foo"), Integer) + else: + assert False + + self.assert_compile( + select(expr), + "SELECT CAST(some_table.name AS INTEGER) AS foo FROM some_table", + ) + is_(select(expr).selected_columns.foo, expr) + + subq = select(expr).subquery() + self.assert_compile( + select(subq).where(subq.c.foo == 10), + "SELECT anon_1.foo FROM (SELECT CAST(some_table.name AS INTEGER) " + "AS foo FROM some_table) AS anon_1 WHERE anon_1.foo = :foo_1", + checkparams={"foo_1": 10}, + ) + def test_type_coerce_auto_label_label_style_disambiguate(self): table1 = self.table1 From b53aafbdb615aa90ff734aafcfe2d319c72dcd80 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 2 Jun 2022 14:52:27 -0400 Subject: [PATCH 258/632] emphasize expire_on_commit in detached error docs issues like #8082 suggest users are still not fully aware of the need to set this parameter when dealing with detached objects. Change-Id: I6f389fdbe18b9c977bfb8188fc4732dbd56884d9 (cherry picked from commit ad86d32f7fbd1c6deda8ff3bebe0595c0f2986cc) --- doc/build/errors.rst | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 376bfaf4344..7c4e3e4d4f8 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -1132,9 +1132,9 @@ method. The objects will then live on to be accessed further, very often within web applications where they are delivered to a server-side templating engine and are asked for further attributes which they cannot load. -Mitigation of this error is via two general techniques: +Mitigation of this error is via these techniques: -* **Don't close the session prematurely** - Often, applications will close +* **Try not to have detached objects; don't close the session prematurely** - Often, applications will close out a transaction before passing off related objects to some other system which then fails due to this error. Sometimes the transaction doesn't need to be closed so soon; an example is the web application closes out @@ -1146,20 +1146,26 @@ Mitigation of this error is via two general techniques: :class:`.Session` can be held open until the lifespan of the objects are done, this is the best approach. -* **Load everything that's needed up front** - It is very often impossible to +* **Otherwise, load everything that's needed up front** - It is very often impossible to keep the transaction open, especially in more complex applications that need to pass objects off to other systems that can't run in the same context even though they're in the same process. In this case, the application - should try to make appropriate use of :term:`eager loading` to ensure + should prepare to deal with :term:`detached` objects, + and should try to make appropriate use of :term:`eager loading` to ensure that objects have what they need up front. - When using this approach, it is usually necessary that the - :paramref:`_orm.Session.expire_on_commit` parameter be set to ``False``, so - that after a :meth:`_orm.Session.commit` operation, the objects within the - session aren't :term:`expired`, which would incur a lazy load if their - attributes were subsequently accessed. Additionally, the - :meth:`_orm.Session.rollback` method unconditionally expires all contents in - the :class:`_orm.Session` and should also be avoided in non-error scenarios. +* **And importantly, set expire_on_commit to False** - When using detached objects, the + most common reason objects need to re-load data is because they were expired + from the last call to :meth:`_orm.Session.commit`. This expiration should + not be used when dealing with detached objects; so the + :paramref:`_orm.Session.expire_on_commit` parameter be set to ``False``. + By preventing the objects from becoming expired outside of the transaction, + the data which was loaded will remain present and will not incur additional + lazy loads when that data is accessed. + + Note also that :meth:`_orm.Session.rollback` method unconditionally expires + all contents in the :class:`_orm.Session` and should also be avoided in + non-error scenarios. .. seealso:: From 4bde4baf27d95f3093f5b078d254f392d2c357fd Mon Sep 17 00:00:00 2001 From: Galen Rice Date: Sat, 4 Jun 2022 06:07:43 -0400 Subject: [PATCH 259/632] docs: spelling error (#8088) Leave off the last S for savings! (cherry picked from commit 424e9e621205c1186e8f9f5009fb35babec31c0e) --- doc/build/orm/session_transaction.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index 6d7c4dd18ed..ce370f2f0eb 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -106,7 +106,7 @@ first:: Similarly, the :class:`_orm.sessionmaker` can be used in the same way:: - Session = sesssionmaker(engine) + Session = sessionmaker(engine) with Session() as session: with session.begin(): From 7f5e599b5759d6b072dddc535f3d151e0df79b0a Mon Sep 17 00:00:00 2001 From: clach04 Date: Sat, 4 Jun 2022 03:15:17 -0700 Subject: [PATCH 260/632] Link to dialect for Actian Avalanche, Vector, Actian X, and Ingres (#8039) (cherry picked from commit 7f20d506937a92019d2c44fd7804ed0f17d6a482) --- doc/build/dialects/index.rst | 105 ++++++++++++++++++----------------- 1 file changed, 54 insertions(+), 51 deletions(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index d632026dc32..f08f5197911 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -76,60 +76,63 @@ External Dialects Currently maintained external dialect projects for SQLAlchemy include: -+-----------------------------------------+---------------------------------------+ -| Database | Dialect | -+=========================================+=======================================+ -| Amazon Redshift (via psycopg2) | sqlalchemy-redshift_ | -+-----------------------------------------+---------------------------------------+ -| Apache Drill | sqlalchemy-drill_ | -+-----------------------------------------+---------------------------------------+ -| Apache Druid | pydruid_ | -+-----------------------------------------+---------------------------------------+ -| Apache Hive and Presto | PyHive_ | -+-----------------------------------------+---------------------------------------+ -| Apache Solr | sqlalchemy-solr_ | -+-----------------------------------------+---------------------------------------+ -| CockroachDB | sqlalchemy-cockroachdb_ | -+-----------------------------------------+---------------------------------------+ -| CrateDB [1]_ | crate-python_ | -+-----------------------------------------+---------------------------------------+ -| EXASolution | sqlalchemy_exasol_ | -+-----------------------------------------+---------------------------------------+ -| Elasticsearch (readonly) | elasticsearch-dbapi_ | -+-----------------------------------------+---------------------------------------+ -| Firebird | sqlalchemy-firebird_ | -+-----------------------------------------+---------------------------------------+ -| Firebolt | firebolt-sqlalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Google BigQuery | pybigquery_ | -+-----------------------------------------+---------------------------------------+ -| Google Sheets | gsheets_ | -+-----------------------------------------+---------------------------------------+ -| IBM DB2 and Informix | ibm-db-sa_ | -+-----------------------------------------+---------------------------------------+ -| IBM Netezza Performance Server [1]_ | nzalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft Access (via pyodbc) | sqlalchemy-access_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft SQL Server (via python-tds) | sqlalchemy-tds_ | -+-----------------------------------------+---------------------------------------+ -| Microsoft SQL Server (via turbodbc) | sqlalchemy-turbodbc_ | -+-----------------------------------------+---------------------------------------+ -| MonetDB [1]_ | sqlalchemy-monetdb_ | -+-----------------------------------------+---------------------------------------+ -| SAP ASE (fork of former Sybase dialect) | sqlalchemy-sybase_ | -+-----------------------------------------+---------------------------------------+ -| SAP Hana [1]_ | sqlalchemy-hana_ | -+-----------------------------------------+---------------------------------------+ -| SAP Sybase SQL Anywhere | sqlalchemy-sqlany_ | -+-----------------------------------------+---------------------------------------+ -| Snowflake | snowflake-sqlalchemy_ | -+-----------------------------------------+---------------------------------------+ -| Teradata Vantage | teradatasqlalchemy_ | -+-----------------------------------------+---------------------------------------+ ++------------------------------------------------+---------------------------------------+ +| Database | Dialect | ++================================================+=======================================+ +| Actian Avalanche, Vector, Actian X, and Ingres | sqlalchemy-ingres_ | ++------------------------------------------------+---------------------------------------+ +| Amazon Redshift (via psycopg2) | sqlalchemy-redshift_ | ++------------------------------------------------+---------------------------------------+ +| Apache Drill | sqlalchemy-drill_ | ++------------------------------------------------+---------------------------------------+ +| Apache Druid | pydruid_ | ++------------------------------------------------+---------------------------------------+ +| Apache Hive and Presto | PyHive_ | ++------------------------------------------------+---------------------------------------+ +| Apache Solr | sqlalchemy-solr_ | ++------------------------------------------------+---------------------------------------+ +| CockroachDB | sqlalchemy-cockroachdb_ | ++------------------------------------------------+---------------------------------------+ +| CrateDB [1]_ | crate-python_ | ++------------------------------------------------+---------------------------------------+ +| EXASolution | sqlalchemy_exasol_ | ++------------------------------------------------+---------------------------------------+ +| Elasticsearch (readonly) | elasticsearch-dbapi_ | ++------------------------------------------------+---------------------------------------+ +| Firebird | sqlalchemy-firebird_ | ++------------------------------------------------+---------------------------------------+ +| Firebolt | firebolt-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Google BigQuery | pybigquery_ | ++------------------------------------------------+---------------------------------------+ +| Google Sheets | gsheets_ | ++------------------------------------------------+---------------------------------------+ +| IBM DB2 and Informix | ibm-db-sa_ | ++------------------------------------------------+---------------------------------------+ +| IBM Netezza Performance Server [1]_ | nzalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft Access (via pyodbc) | sqlalchemy-access_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft SQL Server (via python-tds) | sqlalchemy-tds_ | ++------------------------------------------------+---------------------------------------+ +| Microsoft SQL Server (via turbodbc) | sqlalchemy-turbodbc_ | ++------------------------------------------------+---------------------------------------+ +| MonetDB [1]_ | sqlalchemy-monetdb_ | ++------------------------------------------------+---------------------------------------+ +| SAP ASE (fork of former Sybase dialect) | sqlalchemy-sybase_ | ++------------------------------------------------+---------------------------------------+ +| SAP Hana [1]_ | sqlalchemy-hana_ | ++------------------------------------------------+---------------------------------------+ +| SAP Sybase SQL Anywhere | sqlalchemy-sqlany_ | ++------------------------------------------------+---------------------------------------+ +| Snowflake | snowflake-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ +| Teradata Vantage | teradatasqlalchemy_ | ++------------------------------------------------+---------------------------------------+ .. [1] Supports version 1.3.x only at the moment. +.. _sqlalchemy-ingres: https://github.com/clach04/ingres_sa_dialect .. _nzalchemy: https://pypi.org/project/nzalchemy/ .. _ibm-db-sa: https://pypi.org/project/ibm-db-sa/ .. _PyHive: https://github.com/dropbox/PyHive#sqlalchemy From 31e98d6dc3a690c2094e39b0729d0a1f760fabe4 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 3 Jun 2022 12:13:10 +0200 Subject: [PATCH 261/632] Fixed orm not applying fetch Fixed an issue where :meth:`_sql.GenerativeSelect.fetch` would be ignored when executing a statement using the ORM. Fixes: #8091 Change-Id: I6790c7272a71278e90de2529c8bc8ae89e54e288 (cherry picked from commit 526e9bb6ae025d3b8032d6efc1deb1a0f4a3dae3) --- doc/build/changelog/unreleased_14/8091.rst | 6 +++++ lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 1 - lib/sqlalchemy/orm/context.py | 8 ++++++ test/orm/test_core_compilation.py | 30 +++++++++++++++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8091.rst diff --git a/doc/build/changelog/unreleased_14/8091.rst b/doc/build/changelog/unreleased_14/8091.rst new file mode 100644 index 00000000000..014f66a56a1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8091.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, orm, sql + :tickets: 8091 + + Fixed an issue where :meth:`_sql.GenerativeSelect.fetch` would not + be applied when executing a statement using the ORM. diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index 8e621c8e2e5..9fc6d355ca8 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -210,7 +210,6 @@ def commit(self): self._handle_exception(error) def close(self): - # print(">close", self) try: self.await_(self._connection.close()) except Exception as error: diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 2e3066db937..ab1fc4045b8 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1244,6 +1244,8 @@ def _select_statement( correlate_except, limit_clause, offset_clause, + fetch_clause, + fetch_clause_options, distinct, distinct_on, prefixes, @@ -1276,6 +1278,8 @@ def _select_statement( statement._limit_clause = limit_clause statement._offset_clause = offset_clause + statement._fetch_clause = fetch_clause + statement._fetch_clause_options = fetch_clause_options if prefixes: statement._prefixes = prefixes @@ -2190,6 +2194,10 @@ def _select_args(self): "prefixes": self.select_statement._prefixes, "suffixes": self.select_statement._suffixes, "group_by": self.group_by or None, + "fetch_clause": self.select_statement._fetch_clause, + "fetch_clause_options": ( + self.select_statement._fetch_clause_options + ), } @property diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 0ebc9f6504b..1457f873c5f 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -271,6 +271,36 @@ def test_dml_descriptions( eq_(stmt.entity_description, expected_entity) eq_(stmt.returning_column_descriptions, expected_returning) + def test_limit_offset_select(self): + User = self.classes.User + + stmt = select(User.id).limit(5).offset(6) + self.assert_compile( + stmt, + "SELECT users.id FROM users LIMIT :param_1 OFFSET :param_2", + checkparams={"param_1": 5, "param_2": 6}, + ) + + @testing.combinations( + (None, "ROWS ONLY"), + ({"percent": True}, "PERCENT ROWS ONLY"), + ({"percent": True, "with_ties": True}, "PERCENT ROWS WITH TIES"), + ) + def test_fetch_offset_select(self, options, fetch_clause): + User = self.classes.User + + if options is None: + stmt = select(User.id).fetch(5).offset(6) + else: + stmt = select(User.id).fetch(5, **options).offset(6) + + self.assert_compile( + stmt, + "SELECT users.id FROM users OFFSET :param_1 " + "ROWS FETCH FIRST :param_2 %s" % (fetch_clause,), + checkparams={"param_1": 6, "param_2": 5}, + ) + class ColumnsClauseFromsTest(QueryTest, AssertsCompiledSQL): __dialect__ = "default" From 1936a9590bd7c06a917909c969858255012507ca Mon Sep 17 00:00:00 2001 From: Justin Crown Date: Sat, 4 Jun 2022 16:10:38 -0400 Subject: [PATCH 262/632] Docs Update - Add **kwargs to CaseInsensitiveComparator docs (#8063) * Add **kwargs to CaseInsensitiveComparator docs * add kwargs to other operate examples Change-Id: I70a1e68bca27c2355ad3b7c5bbc538027f112bd9 * missed one entry Change-Id: Ieb4a18ab6d96e588e9ec7672cfa65fe2fd8301e5 Co-authored-by: Federico Caselli (cherry picked from commit 1508aed47261fe17180aa12fb312aebb0dd3c615) --- lib/sqlalchemy/ext/hybrid.py | 24 +++++++++++++++--------- lib/sqlalchemy/sql/operators.py | 4 ++-- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index 8a4a7234681..cc0aca6ca32 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -484,8 +484,12 @@ def word_insensitive(cls): ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: class CaseInsensitiveComparator(Comparator): - def operate(self, op, other): - return op(func.lower(self.__clause_element__()), func.lower(other)) + def operate(self, op, other, **kwargs): + return op( + func.lower(self.__clause_element__()), + func.lower(other), + **kwargs, + ) .. _hybrid_reuse_subclass: @@ -575,10 +579,10 @@ def __init__(self, word): else: self.word = func.lower(word) - def operate(self, op, other): + def operate(self, op, other, **kwargs): if not isinstance(other, CaseInsensitiveWord): other = CaseInsensitiveWord(other) - return op(self.word, other.word) + return op(self.word, other.word, **kwargs) def __clause_element__(self): return self.word @@ -706,12 +710,14 @@ def grandparent(self): from sqlalchemy.ext.hybrid import Comparator class GrandparentTransformer(Comparator): - def operate(self, op, other): + def operate(self, op, other, **kwargs): def transform(q): cls = self.__clause_element__() parent_alias = aliased(cls) - return q.join(parent_alias, cls.parent).\ - filter(op(parent_alias.parent, other)) + return q.join(parent_alias, cls.parent).filter( + op(parent_alias.parent, other, **kwargs) + ) + return transform Base = declarative_base() @@ -783,8 +789,8 @@ def go(q): return q.join(self.parent_alias, Node.parent) return go - def operate(self, op, other): - return op(self.parent_alias.parent, other) + def operate(self, op, other, **kwargs): + return op(self.parent_alias.parent, other, **kwargs) .. sourcecode:: pycon+sql diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 4ab0c4f29ea..826b3129384 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -217,8 +217,8 @@ def operate(self, op, *other, **kwargs): side:: class MyComparator(ColumnOperators): - def operate(self, op, other): - return op(func.lower(self), func.lower(other)) + def operate(self, op, other, **kwargs): + return op(func.lower(self), func.lower(other), **kwargs) :param op: Operator callable. :param \*other: the 'other' side of the operation. Will From 7eba007385a75aa0f29a1466cfbb335aa6abbe8b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 4 Jun 2022 15:53:34 -0400 Subject: [PATCH 263/632] migrate labels to new tutorial other org changes and some sections from old tutorial ported to new tutorial. Change-Id: Ic0fba60ec82fff481890887beef9ed0fa271875a (Cherry-picked and independently modified) --- doc/build/changelog/changelog_10.rst | 2 +- doc/build/changelog/migration_11.rst | 9 +- doc/build/changelog/migration_12.rst | 2 +- doc/build/changelog/migration_20.rst | 81 +- doc/build/core/metadata.rst | 2 + doc/build/core/pooling.rst | 3 - doc/build/dialects/mysql.rst | 2 +- doc/build/dialects/postgresql.rst | 1 + doc/build/errors.rst | 1118 +++++++++-------- doc/build/faq/connections.rst | 23 +- doc/build/faq/sessions.rst | 2 +- doc/build/glossary.rst | 2 +- doc/build/orm/extensions/baked.rst | 1 + doc/build/orm/extensions/hybrid.rst | 2 +- doc/build/orm/extensions/mypy.rst | 6 +- doc/build/orm/persistence_techniques.rst | 2 +- doc/build/orm/queryguide.rst | 2 +- doc/build/orm/relationships.rst | 2 +- doc/build/orm/self_referential.rst | 2 +- doc/build/orm/session_basics.rst | 2 +- doc/build/tutorial/data_insert.rst | 1 + doc/build/tutorial/data_select.rst | 158 ++- doc/build/tutorial/data_update.rst | 2 + doc/build/tutorial/dbapi_transactions.rst | 1 + doc/build/tutorial/orm_related_objects.rst | 4 + lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/dml.py | 2 +- .../dialects/postgresql/psycopg2.py | 4 +- lib/sqlalchemy/engine/row.py | 2 +- lib/sqlalchemy/engine/url.py | 8 +- lib/sqlalchemy/orm/query.py | 5 +- lib/sqlalchemy/orm/relationships.py | 10 +- lib/sqlalchemy/orm/util.py | 2 - lib/sqlalchemy/sql/dml.py | 30 +- lib/sqlalchemy/sql/elements.py | 28 +- lib/sqlalchemy/sql/functions.py | 4 +- lib/sqlalchemy/sql/selectable.py | 32 +- 37 files changed, 866 insertions(+), 695 deletions(-) diff --git a/doc/build/changelog/changelog_10.rst b/doc/build/changelog/changelog_10.rst index 4d3b84d3b40..addf624de72 100644 --- a/doc/build/changelog/changelog_10.rst +++ b/doc/build/changelog/changelog_10.rst @@ -811,7 +811,7 @@ .. seealso:: - :ref:`updates_order_parameters` + :ref:`tutorial_parameter_ordered_updates` .. change:: :tags: bug, orm diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index a2c88ae11d2..5c1b842b61e 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -1213,7 +1213,7 @@ RANGE and ROWS expressions for window functions:: .. seealso:: - :ref:`window_functions` + :ref:`tutorial_window_functions` :ticket:`3049` @@ -1242,7 +1242,7 @@ selectable, e.g. lateral correlation:: .. seealso:: - :ref:`lateral_selects` + :ref:`tutorial_lateral_correlation` :class:`_expression.Lateral` @@ -1478,7 +1478,7 @@ this behavioral change for applications using it are at :ref:`behavior_change_35 .. seealso:: - :ref:`sqlexpression_text_columns` - in the Core tutorial + :ref:`tutorial_select_arbitrary_text` :ref:`behavior_change_3501` - backwards compatibility remarks @@ -2606,9 +2606,6 @@ Support for PyGreSQL The `PyGreSQL `_ DBAPI is now supported. -.. seealso:: - - :ref:`dialect-postgresql-pygresql` The "postgres" module is removed -------------------------------- diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index bc1d0739e9d..7073660f788 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -905,7 +905,7 @@ would render as:: .. seealso:: - :ref:`multi_table_deletes` + :ref:`tutorial_multi_table_deletes` :ticket:`959` diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 86c8b1a69b3..64bead3cd1b 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -165,8 +165,8 @@ Given the example program below:: The above program uses several patterns that many users will already identify as "legacy", namely the use of the :meth:`_engine.Engine.execute` method -that's part of the :ref:`connectionless execution ` -system. When we run the above program against 1.4, it returns a single line:: +that's part of the "connectionless execution" API. When we run the above +program against 1.4, it returns a single line:: $ python test3.py [(1,)] @@ -2080,11 +2080,84 @@ explicit use of :meth:`_orm.Session.begin`, which is now solved by 1.4, as well as to allow the use of "subtransactions", which are also removed in 2.0. +.. _migration_20_session_subtransaction: + Session "subtransaction" behavior removed ------------------------------------------ -See the section :ref:`session_subtransactions` for background on this -change. +**Synopsis** + +The "subtransaction" pattern that was often used with autocommit mode is +also deprecated in 1.4. This pattern allowed the use of the +:meth:`_orm.Session.begin` method when a transaction were already begun, +resulting in a construct called a "subtransaction", which was essentially +a block that would prevent the :meth:`_orm.Session.commit` method from actually +committing. + +**Migration to 2.0** + + +To provide backwards compatibility for applications that make use of this +pattern, the following context manager or a similar implementation based on +a decorator may be used:: + + + import contextlib + + @contextlib.contextmanager + def transaction(session): + if not session.in_transaction(): + with session.begin(): + yield + else: + yield + + +The above context manager may be used in the same way the +"subtransaction" flag works, such as in the following example:: + + + # method_a starts a transaction and calls method_b + def method_a(session): + with transaction(session): + method_b(session) + + # method_b also starts a transaction, but when + # called from method_a participates in the ongoing + # transaction. + def method_b(session): + with transaction(session): + session.add(SomeObject('bat', 'lala')) + + Session = sessionmaker(engine) + + # create a Session and call method_a + with Session() as session: + method_a(session) + +To compare towards the preferred idiomatic pattern, the begin block should +be at the outermost level. This removes the need for individual functions +or methods to be concerned with the details of transaction demarcation:: + + def method_a(session): + method_b(session) + + def method_b(session): + session.add(SomeObject('bat', 'lala')) + + Session = sessionmaker(engine) + + # create a Session and call method_a + with Session() as session: + with session.begin(): + method_a(session) + +**Discussion** + +This pattern has been shown to be confusing in real world applications, and it +is preferable for an application to ensure that the top-most level of database +operations are performed with a single begin/commit pair. + 2.0 Migration - ORM Extension and Recipe Changes diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 366f165651b..5c6fa2e5cbf 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -557,6 +557,7 @@ Column, Table, MetaData API --------------------------- .. attribute:: sqlalchemy.schema.BLANK_SCHEMA + :noindex: Symbol indicating that a :class:`_schema.Table` or :class:`.Sequence` should have 'None' for its schema, even if the parent @@ -573,6 +574,7 @@ Column, Table, MetaData API .. versionadded:: 1.0.14 .. attribute:: sqlalchemy.schema.RETAIN_SCHEMA + :noindex: Symbol indicating that a :class:`_schema.Table`, :class:`.Sequence` or in some cases a :class:`_schema.ForeignKey` object, in situations diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index c6ef94a0a7a..59223ee7aaa 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -581,19 +581,16 @@ API Documentation - Available Pool Implementations .. autoclass:: sqlalchemy.pool.Pool - .. automethod:: __init__ .. automethod:: connect .. automethod:: dispose .. automethod:: recreate .. autoclass:: sqlalchemy.pool.QueuePool - .. automethod:: __init__ .. automethod:: connect .. autoclass:: SingletonThreadPool - .. automethod:: __init__ .. autoclass:: AssertionPool diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index 49dbff71bab..64a6f45f968 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -77,7 +77,7 @@ construction arguments, are as follows: .. autoclass:: DOUBLE :members: __init__ - + :noindex: .. autoclass:: ENUM :members: __init__ diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index 1c4b982e0a7..d30c03885d5 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -51,6 +51,7 @@ construction arguments, are as follows: .. autoclass:: DOUBLE_PRECISION :members: __init__ + :noindex: .. autoclass:: ENUM diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 7c4e3e4d4f8..3c0632af692 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -33,440 +33,6 @@ Within this section, the goal is to try to provide background on some of the most common runtime errors as well as programming time errors. -Legacy API Features -=================== - -.. the reason we need this section here distinct from the migration notes - is because this is actually an ArgumentError that's raised by select() - when the "legacy" and "future" mode styles are used together. - -.. _error_c9ae: - -select() construct created in "legacy" mode; keyword arguments, etc. --------------------------------------------------------------------- - -The :func:`_expression.select` construct has been updated as of SQLAlchemy -1.4 to support the newer calling style that will be standard in -:ref:`SQLAlchemy 2.0 `. For backwards compatibility in the -interim, the construct accepts arguments in both the "legacy" style as well -as the "new" style. - -The "new" style features that column and table expressions are passed -positionally to the :func:`_expression.select` construct only; any other -modifiers to the object must be passed using subsequent method chaining:: - - # this is the way to do it going forward - stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid) - -For comparison, a :func:`_expression.select` in legacy forms of SQLAlchemy, -before methods like :meth:`.Select.where` were even added, would like:: - - # this is how it was documented in original SQLAlchemy versions - # many years ago - stmt = select([table1.c.myid], whereclause=table1.c.myid == table2.c.otherid) - -Or even that the "whereclause" would be passed positionally:: - - # this is also how it was documented in original SQLAlchemy versions - # many years ago - stmt = select([table1.c.myid], table1.c.myid == table2.c.otherid) - -For some years now, the additional "whereclause" and other arguments that are -accepted have been removed from most narrative documentation, leading to a -calling style that is most familiar as the list of column arguments passed -as a list, but no further arguments:: - - # this is how it's been documented since around version 1.0 or so - stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid) - -The document at :ref:`migration_20_5284` describes this change in terms -of :ref:`2.0 Migration `. - -.. seealso:: - - :ref:`migration_20_5284` - - :ref:`migration_20_toplevel` - - - -.. _error_b8d9: - -The in SQLAlchemy 2.0 will no longer --------------------------------------------------------------------------------------------- - -SQLAlchemy 2.0 is expected to be a major shift for a wide variety of key -SQLAlchemy usage patterns in both the Core and ORM components. The goal -of this release is to make a slight readjustment in some of the most -fundamental assumptions of SQLAlchemy since its early beginnings, and -to deliver a newly streamlined usage model that is hoped to be significantly -more minimalist and consistent between the Core and ORM components, as well as -more capable. - -Introduced at :ref:`migration_20_toplevel`, the SQLAlchemy 2.0 project includes -a comprehensive future compatibility system that is to be integrated into the -1.4 series of SQLAlchemy, such that applications will have a clear, -unambiguous, and incremental upgrade path in order to migrate applications to -being fully 2.0 compatible. The :class:`.exc.RemovedIn20Warning` deprecation -warning is at the base of this system to provide guidance on what behaviors in -an existing codebase will need to be modified. An overview of how to enable -this warning is at :ref:`deprecation_20_mode`. - -.. seealso:: - - :ref:`migration_20_toplevel` - An overview of the upgrade process from - the 1.x series, as well as the current goals and progress of SQLAlchemy - 2.0. - - - :ref:`deprecation_20_mode` - specific guidelines on how to use - "2.0 deprecations mode" in SQLAlchemy 1.4. - -.. _error_c9bf: - -A bind was located via legacy bound metadata, but since future=True is set on this Session, this bind is ignored. -------------------------------------------------------------------------------------------------------------------- - -The concept of "bound metadata" is being removed in SQLAlchemy 2.0. This -refers to the :paramref:`_schema.MetaData.bind` parameter on the -:class:`_schema.MetaData` object that in turn allows objects like the ORM -:class:`_orm.Session` to associate a particular mapped class with an -:class:`_orm.Engine`. In SQLAlchemy 2.0, the :class:`_orm.Session` must be -linked to each :class:`_orm.Engine` directly. That is, instead of instantiating -the :class:`_orm.Session` or -:class:`_orm.sessionmaker` without any arguments, and associating the -:class:`_engine.Engine` with the :class:`_schema.MetaData`:: - - engine = create_engine("sqlite://") - Session = sessionmaker() - metadata_obj = MetaData(bind=engine) - Base = declarative_base(metadata=metadata_obj) - - class MyClass(Base): - # ... - - - session = Session() - session.add(MyClass()) - session.commit() - -The :class:`_engine.Engine` must instead be associated directly with the -:class:`_orm.sessionmaker` or :class:`_orm.Session`. The -:class:`_schema.MetaData` object should no longer be associated with any -engine:: - - - engine = create_engine("sqlite://") - Session = sessionmaker(engine) - Base = declarative_base() - - class MyClass(Base): - # ... - - - session = Session() - session.add(MyClass()) - session.commit() - -In SQLAlchemy 1.4, this :term:`2.0 style` behavior is enabled when the -:paramref:`_orm.Session.future` flag is set on :class:`_orm.sessionmaker` -or :class:`_orm.Session`. - -.. _error_cprf: -.. _caching_caveats: - -Object will not produce a cache key, Performance Implications --------------------------------------------------------------- - -SQLAlchemy as of version 1.4 includes a -:ref:`SQL compilation caching facility ` which will allow -Core and ORM SQL constructs to cache their stringified form, along with other -structural information used to fetch results from the statement, allowing the -relatively expensive string compilation process to be skipped when another -structurally equivalent construct is next used. This system -relies upon functionality that is implemented for all SQL constructs, including -objects such as :class:`_schema.Column`, -:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a -**cache key** which fully represents their state to the degree that it affects -the SQL compilation process. - -If the warnings in question refer to widely used objects such as -:class:`_schema.Column` objects, and are shown to be affecting the majority of -SQL constructs being emitted (using the estimation techniques described at -:ref:`sql_caching_logging`) such that caching is generally not enabled for an -application, this will negatively impact performance and can in some cases -effectively produce a **performance degradation** compared to prior SQLAlchemy -versions. The FAQ at :ref:`faq_new_caching` covers this in additional detail. - -Caching disables itself if there's any doubt -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Caching relies on being able to generate a cache key that accurately represents -the **complete structure** of a statement in a **consistent** fashion. If a particular -SQL construct (or type) does not have the appropriate directives in place which -allow it to generate a proper cache key, then caching cannot be safely enabled: - -* The cache key must represent the **complete structure**: If the usage of two - separate instances of that construct may result in different SQL being - rendered, caching the SQL against the first instance of the element using a - cache key that does not capture the distinct differences between the first and - second elements will result in incorrect SQL being cached and rendered for the - second instance. - -* The cache key must be **consistent**: If a construct represents state that - changes every time, such as a literal value, producing unique SQL for every - instance of it, this construct is also not safe to cache, as repeated use of - the construct will quickly fill up the statement cache with unique SQL strings - that will likely not be used again, defeating the purpose of the cache. - -For the above two reasons, SQLAlchemy's caching system is **extremely -conservative** about deciding to cache the SQL corresponding to an object. - -Assertion attributes for caching -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The warning is emitted based on the criteria below. For further detail on -each, see the section :ref:`faq_new_caching`. - -* The :class:`.Dialect` itself (i.e. the module that is specified by the - first part of the URL we pass to :func:`_sa.create_engine`, like - ``postgresql+psycopg2://``), must indicate it has been reviewed and tested - to support caching correctly, which is indicated by the - :attr:`.Dialect.supports_statement_cache` attribute being set to ``True``. - When using third party dialects, consult with the maintainers of the dialect - so that they may follow the :ref:`steps to ensure caching may be enabled - ` in their dialect and publish a new release. - -* Third party or user defined types that inherit from either - :class:`.TypeDecorator` or :class:`.UserDefinedType` must include the - :attr:`.ExternalType.cache_ok` attribute in their definition, including for - all derived subclasses, following the guidelines described in the docstring - for :attr:`.ExternalType.cache_ok`. As before, if these datatypes are - imported from third party libraries, consult with the maintainers of that - library so that they may provide the necessary changes to their library and - publish a new release. - -* Third party or user defined SQL constructs that subclass from classes such - as :class:`.ClauseElement`, :class:`_schema.Column`, :class:`_dml.Insert` - etc, including simple subclasses as well as those which are designed to - work with the :ref:`sqlalchemy.ext.compiler_toplevel`, should normally - include the :attr:`.HasCacheKey.inherit_cache` attribute set to ``True`` - or ``False`` based on the design of the construct, following the guidelines - described at :ref:`compilerext_caching`. - -.. seealso:: - - :ref:`sql_caching_logging` - background on observing cache behavior - and efficiency - - :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section - -.. _error_s9r1: - -Object is being merged into a Session along the backref cascade ---------------------------------------------------------------- - -This message refers to the "backref cascade" behavior of SQLAlchemy, -which is described at :ref:`backref_cascade`. This refers to the action of -an object being added into a :class:`_orm.Session` as a result of another -object that's already present in that session being associated with it. -As this behavior has been shown to be more confusing than helpful, -the :paramref:`_orm.relationship.cascade_backrefs` and -:paramref:`_orm.backref.cascade_backrefs` parameters were added, which can -be set to ``False`` to disable it, and in SQLAlchemy 2.0 the "cascade backrefs" -behavior will be disabled completely. - -To set :paramref:`_orm.relationship.cascade_backrefs` to ``False`` on a -backref that is currently configured using the -:paramref:`_orm.relationship.backref` string parameter, the backref must -be declared using the :func:`_orm.backref` function first so that the -:paramref:`_orm.backref.cascade_backrefs` parameter may be passed. - -Alternatively, the entire "cascade backrefs" behavior can be turned off -across the board by using the :class:`_orm.Session` in "future" mode, -by passing ``True`` for the :paramref:`_orm.Session.future` parameter. - -.. seealso:: - - :ref:`backref_cascade` - complete description of the cascade backrefs - behavior - - :ref:`change_5150` - background on the change for SQLAlchemy 2.0. - -.. _error_xaj1: - -An alias is being generated automatically for raw clauseelement ----------------------------------------------------------------- - -.. versionadded:: 1.4.26 - -This deprecation warning refers to a very old and likely not well known pattern -that applies to the legacy :meth:`_orm.Query.join` method as well as the -:term:`2.0 style` :meth:`_sql.Select.join` method, where a join can be stated -in terms of a :func:`_orm.relationship` but the target is the -:class:`_schema.Table` or other Core selectable to which the class is mapped, -rather than an ORM entity such as a mapped class or :func:`_orm.aliased` -construct:: - - a1 = Address.__table__ - - q = s.query(User).\ - join(a1, User.addresses).\ - filter(Address.email_address == 'ed@foo.com').all() - - -The above pattern also allows an arbitrary selectable, such as -a Core :class:`_sql.Join` or :class:`_sql.Alias` object, -however there is no automatic adaptation of this element, meaning the -Core element would need to be referred towards directly:: - - a1 = Address.__table__.alias() - - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.c.email_address == 'ed@foo.com').all() - -The correct way to specify a join target is always by using the mapped -class itself or an :class:`_orm.aliased` object, in the latter case using the -:meth:`_orm.PropComparator.of_type` modifier to set up an alias:: - - # normal join to relationship entity - q = s.query(User).\ - join(User.addresses).\ - filter(Address.email_address == 'ed@foo.com') - - # name Address target explicitly, not necessary but legal - q = s.query(User).\ - join(Address, User.addresses).\ - filter(Address.email_address == 'ed@foo.com') - -Join to an alias:: - - from sqlalchemy.orm import aliased - - a1 = aliased(Address) - - # of_type() form; recommended - q = s.query(User).\ - join(User.addresses.of_type(a1)).\ - filter(a1.email_address == 'ed@foo.com') - - # target, onclause form - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.email_address == 'ed@foo.com') - - -.. _error_xaj2: - -An alias is being generated automatically due to overlapping tables -------------------------------------------------------------------- - -.. versionadded:: 1.4.26 - -This warning is typically generated when querying using the -:meth:`_sql.Select.join` method or the legacy :meth:`_orm.Query.join` method -with mappings that involve joined table inheritance. The issue is that when -joining between two joined inheritance models that share a common base table, a -proper SQL JOIN between the two entities cannot be formed without applying an -alias to one side or the other; SQLAlchemy applies an alias to the right side -of the join. For example given a joined inheritance mapping as:: - - class Employee(Base): - __tablename__ = 'employee' - id = Column(Integer, primary_key=True) - manager_id = Column(ForeignKey("manager.id")) - name = Column(String(50)) - type = Column(String(50)) - - reports_to = relationship("Manager", foreign_keys=manager_id) - - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, - } - - class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'inherit_condition': id == Employee.id - } - -The above mapping includes a relationship between the ``Employee`` and -``Manager`` classes. Since both classes make use of the "employee" database -table, from a SQL perspective this is a -:ref:`self referential relationship `. If we wanted to -query from both the ``Employee`` and ``Manager`` models using a join, at the -SQL level the "employee" table needs to be included twice in the query, which -means it must be aliased. When we create such a join using the SQLAlchemy -ORM, we get SQL that looks like the following: - -.. sourcecode:: pycon+sql - - >>> stmt = select(Employee, Manager).join(Employee.reports_to) - >>> print(stmt) - {opensql}SELECT employee.id, employee.manager_id, employee.name, - employee.type, manager_1.id AS id_1, employee_1.id AS id_2, - employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, - employee_1.type AS type_1 - FROM employee JOIN - (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) - ON manager_1.id = employee.manager_id - -Above, the SQL selects FROM the ``employee`` table, representing the -``Employee`` entity in the query. It then joins to a right-nested join of -``employee AS employee_1 JOIN manager AS manager_1``, where the ``employee`` -table is stated again, except as an anonymous alias ``employee_1``. This is the -"automatic generation of an alias" that the warning message refers towards. - -When SQLAlchemy loads ORM rows that each contain an ``Employee`` and a -``Manager`` object, the ORM must adapt rows from what above is the -``employee_1`` and ``manager_1`` table aliases into those of the un-aliased -``Manager`` class. This process is internally complex and does not accommodate -for all API features, notably when trying to use eager loading features such as -:func:`_orm.contains_eager` with more deeply nested queries than are shown -here. As the pattern is unreliable for more complex scenarios and involves -implicit decisionmaking that is difficult to anticipate and follow, -the warning is emitted and this pattern may be considered a legacy feature. The -better way to write this query is to use the same patterns that apply to any -other self-referential relationship, which is to use the :func:`_orm.aliased` -construct explicitly. For joined-inheritance and other join-oriented mappings, -it is usually desirable to add the use of the :paramref:`_orm.aliased.flat` -parameter, which will allow a JOIN of two or more tables to be aliased by -applying an alias to the individual tables within the join, rather than -embedding the join into a new subquery: - -.. sourcecode:: pycon+sql - - >>> from sqlalchemy.orm import aliased - >>> manager_alias = aliased(Manager, flat=True) - >>> stmt = select(Employee, manager_alias).join(Employee.reports_to.of_type(manager_alias)) - >>> print(stmt) - {opensql}SELECT employee.id, employee.manager_id, employee.name, - employee.type, manager_1.id AS id_1, employee_1.id AS id_2, - employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, - employee_1.type AS type_1 - FROM employee JOIN - (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) - ON manager_1.id = employee.manager_id - -If we then wanted to use :func:`_orm.contains_eager` to populate the -``reports_to`` attribute, we refer to the alias:: - - >>> stmt =select(Employee).join( - ... Employee.reports_to.of_type(manager_alias) - ... ).options( - ... contains_eager(Employee.reports_to.of_type(manager_alias)) - ... ) - -Without using the explicit :func:`_orm.aliased` object, in some more nested -cases the :func:`_orm.contains_eager` option does not have enough context to -know where to get its data from, in the case that the ORM is "auto-aliasing" -in a very nested context. Therefore it's best not to rely on this feature -and instead keep the SQL construction as explicit as possible. Connections and Transactions ============================ @@ -636,56 +202,6 @@ method. When a connection is invalidated, any :class:`_engine.Transaction` that was in progress is now in an invalid state, and must be explicitly rolled back in order to remove it from the :class:`_engine.Connection`. -.. _error_8s2a: - -This connection is on an inactive transaction. Please rollback() fully before proceeding ------------------------------------------------------------------------------------------- - -This error condition was added to SQLAlchemy as of version 1.4. The error -refers to the state where a :class:`_engine.Connection` is placed into a -transaction using a method like :meth:`_engine.Connection.begin`, and then a -further "marker" transaction is created within that scope; the "marker" -transaction is then rolled back using :meth:`.Transaction.rollback` or closed -using :meth:`.Transaction.close`, however the outer transaction is still -present in an "inactive" state and must be rolled back. - -The pattern looks like:: - - engine = create_engine(...) - - connection = engine.connect() - transaction1 = connection.begin() - - # this is a "sub" or "marker" transaction, a logical nesting - # structure based on "real" transaction transaction1 - transaction2 = connection.begin() - transaction2.rollback() - - # transaction1 is still present and needs explicit rollback, - # so this will raise - connection.execute(text("select 1")) - -Above, ``transaction2`` is a "marker" transaction, which indicates a logical -nesting of transactions within an outer one; while the inner transaction -can roll back the whole transaction via its rollback() method, its commit() -method has no effect except to close the scope of the "marker" transaction -itself. The call to ``transaction2.rollback()`` has the effect of -**deactivating** transaction1 which means it is essentially rolled back -at the database level, however is still present in order to accommodate -a consistent nesting pattern of transactions. - -The correct resolution is to ensure the outer transaction is also -rolled back:: - - transaction1.rollback() - -This pattern is not commonly used in Core. Within the ORM, a similar issue can -occur which is the product of the ORM's "logical" transaction structure; this -is described in the FAQ entry at :ref:`faq_session_rollback`. - -The "subtransaction" pattern is to be removed in SQLAlchemy 2.0 so that this -particular programming pattern will no longer be available and this -error message will no longer occur in Core. .. _error_dbapi: @@ -780,42 +296,131 @@ cursor is not valid anymore, the transaction is out of sync, etc. This error is a :ref:`DBAPI Error ` and originates from the database driver (DBAPI), not SQLAlchemy itself. -The ``InternalError`` is sometimes raised by drivers in the context -of the database connection being dropped, or not being able to connect -to the database. For tips on how to deal with this, see the section -:ref:`pool_disconnects`. +The ``InternalError`` is sometimes raised by drivers in the context +of the database connection being dropped, or not being able to connect +to the database. For tips on how to deal with this, see the section +:ref:`pool_disconnects`. + +.. _error_f405: + +ProgrammingError +---------------- + +Exception raised for programming errors, e.g. table not found or already +exists, syntax error in the SQL statement, wrong number of parameters +specified, etc. + +This error is a :ref:`DBAPI Error ` and originates from +the database driver (DBAPI), not SQLAlchemy itself. + +The ``ProgrammingError`` is sometimes raised by drivers in the context +of the database connection being dropped, or not being able to connect +to the database. For tips on how to deal with this, see the section +:ref:`pool_disconnects`. + +.. _error_tw8g: + +NotSupportedError +------------------ + +Exception raised in case a method or database API was used which is not +supported by the database, e.g. requesting a .rollback() on a connection that +does not support transaction or has transactions turned off. + +This error is a :ref:`DBAPI Error ` and originates from +the database driver (DBAPI), not SQLAlchemy itself. + +SQL Expression Language +======================= +.. _error_cprf: +.. _caching_caveats: + +Object will not produce a cache key, Performance Implications +-------------------------------------------------------------- + +SQLAlchemy as of version 1.4 includes a +:ref:`SQL compilation caching facility ` which will allow +Core and ORM SQL constructs to cache their stringified form, along with other +structural information used to fetch results from the statement, allowing the +relatively expensive string compilation process to be skipped when another +structurally equivalent construct is next used. This system +relies upon functionality that is implemented for all SQL constructs, including +objects such as :class:`_schema.Column`, +:func:`_sql.select`, and :class:`_types.TypeEngine` objects, to produce a +**cache key** which fully represents their state to the degree that it affects +the SQL compilation process. + +If the warnings in question refer to widely used objects such as +:class:`_schema.Column` objects, and are shown to be affecting the majority of +SQL constructs being emitted (using the estimation techniques described at +:ref:`sql_caching_logging`) such that caching is generally not enabled for an +application, this will negatively impact performance and can in some cases +effectively produce a **performance degradation** compared to prior SQLAlchemy +versions. The FAQ at :ref:`faq_new_caching` covers this in additional detail. + +Caching disables itself if there's any doubt +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Caching relies on being able to generate a cache key that accurately represents +the **complete structure** of a statement in a **consistent** fashion. If a particular +SQL construct (or type) does not have the appropriate directives in place which +allow it to generate a proper cache key, then caching cannot be safely enabled: + +* The cache key must represent the **complete structure**: If the usage of two + separate instances of that construct may result in different SQL being + rendered, caching the SQL against the first instance of the element using a + cache key that does not capture the distinct differences between the first and + second elements will result in incorrect SQL being cached and rendered for the + second instance. + +* The cache key must be **consistent**: If a construct represents state that + changes every time, such as a literal value, producing unique SQL for every + instance of it, this construct is also not safe to cache, as repeated use of + the construct will quickly fill up the statement cache with unique SQL strings + that will likely not be used again, defeating the purpose of the cache. -.. _error_f405: +For the above two reasons, SQLAlchemy's caching system is **extremely +conservative** about deciding to cache the SQL corresponding to an object. -ProgrammingError ----------------- +Assertion attributes for caching +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -Exception raised for programming errors, e.g. table not found or already -exists, syntax error in the SQL statement, wrong number of parameters -specified, etc. +The warning is emitted based on the criteria below. For further detail on +each, see the section :ref:`faq_new_caching`. -This error is a :ref:`DBAPI Error ` and originates from -the database driver (DBAPI), not SQLAlchemy itself. +* The :class:`.Dialect` itself (i.e. the module that is specified by the + first part of the URL we pass to :func:`_sa.create_engine`, like + ``postgresql+psycopg2://``), must indicate it has been reviewed and tested + to support caching correctly, which is indicated by the + :attr:`.Dialect.supports_statement_cache` attribute being set to ``True``. + When using third party dialects, consult with the maintainers of the dialect + so that they may follow the :ref:`steps to ensure caching may be enabled + ` in their dialect and publish a new release. -The ``ProgrammingError`` is sometimes raised by drivers in the context -of the database connection being dropped, or not being able to connect -to the database. For tips on how to deal with this, see the section -:ref:`pool_disconnects`. +* Third party or user defined types that inherit from either + :class:`.TypeDecorator` or :class:`.UserDefinedType` must include the + :attr:`.ExternalType.cache_ok` attribute in their definition, including for + all derived subclasses, following the guidelines described in the docstring + for :attr:`.ExternalType.cache_ok`. As before, if these datatypes are + imported from third party libraries, consult with the maintainers of that + library so that they may provide the necessary changes to their library and + publish a new release. -.. _error_tw8g: +* Third party or user defined SQL constructs that subclass from classes such + as :class:`.ClauseElement`, :class:`_schema.Column`, :class:`_dml.Insert` + etc, including simple subclasses as well as those which are designed to + work with the :ref:`sqlalchemy.ext.compiler_toplevel`, should normally + include the :attr:`.HasCacheKey.inherit_cache` attribute set to ``True`` + or ``False`` based on the design of the construct, following the guidelines + described at :ref:`compilerext_caching`. -NotSupportedError ------------------- +.. seealso:: -Exception raised in case a method or database API was used which is not -supported by the database, e.g. requesting a .rollback() on a connection that -does not support transaction or has transactions turned off. + :ref:`sql_caching_logging` - background on observing cache behavior + and efficiency -This error is a :ref:`DBAPI Error ` and originates from -the database driver (DBAPI), not SQLAlchemy itself. + :ref:`faq_new_caching` - in the :ref:`faq_toplevel` section -SQL Expression Language -======================= .. _error_l7de: @@ -931,46 +536,6 @@ The solution is to access the :class:`_schema.Column` directly using the CheckConstraint(cprop.expression > 5), ) -.. _error_2afi: - -This Compiled object is not bound to any Engine or Connection -------------------------------------------------------------- - -This error refers to the concept of "bound metadata", described at -:ref:`dbengine_implicit`. The issue occurs when one invokes the -:meth:`.Executable.execute` method directly off of a Core expression object -that is not associated with any :class:`_engine.Engine`:: - - metadata_obj = MetaData() - table = Table('t', metadata_obj, Column('q', Integer)) - - stmt = select(table) - result = stmt.execute() # <--- raises - -What the logic is expecting is that the :class:`_schema.MetaData` object has -been **bound** to a :class:`_engine.Engine`:: - - engine = create_engine("mysql+pymysql://user:pass@host/db") - metadata_obj = MetaData(bind=engine) - -Where above, any statement that derives from a :class:`_schema.Table` which -in turn derives from that :class:`_schema.MetaData` will implicitly make use of -the given :class:`_engine.Engine` in order to invoke the statement. - -Note that the concept of bound metadata is a **legacy pattern** and in most -cases is **highly discouraged**. The best way to invoke the statement is -to pass it to the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: - - with engine.connect() as conn: - result = conn.execute(stmt) - -When using the ORM, a similar facility is available via the :class:`.Session`:: - - result = session.execute(stmt) - -.. seealso:: - - :ref:`dbengine_implicit` .. _error_cd3x: @@ -1038,9 +603,7 @@ Since "b" is required, pass it as ``None`` so that the INSERT may proceed:: .. seealso:: - :ref:`coretutorial_bind_param` - - :ref:`execute_multiple` + :ref:`tutorial_sending_parameters` .. _error_89ve: @@ -1090,13 +653,188 @@ therefore requires that :meth:`_expression.SelectBase.subquery` is used:: subq = stmt.subquery() - new_stmt_1 = select(subq) + new_stmt_1 = select(subq) + + new_stmt_2 = select(some_table).select_from(some_table.join(subq)) + +.. seealso:: + + :ref:`change_4617` + +.. _error_xaj1: + +An alias is being generated automatically for raw clauseelement +---------------------------------------------------------------- + +.. versionadded:: 1.4.26 + +This deprecation warning refers to a very old and likely not well known pattern +that applies to the legacy :meth:`_orm.Query.join` method as well as the +:term:`2.0 style` :meth:`_sql.Select.join` method, where a join can be stated +in terms of a :func:`_orm.relationship` but the target is the +:class:`_schema.Table` or other Core selectable to which the class is mapped, +rather than an ORM entity such as a mapped class or :func:`_orm.aliased` +construct:: + + a1 = Address.__table__ + + q = s.query(User).\ + join(a1, User.addresses).\ + filter(Address.email_address == 'ed@foo.com').all() + + +The above pattern also allows an arbitrary selectable, such as +a Core :class:`_sql.Join` or :class:`_sql.Alias` object, +however there is no automatic adaptation of this element, meaning the +Core element would need to be referred towards directly:: + + a1 = Address.__table__.alias() + + q = s.query(User).\ + join(a1, User.addresses).\ + filter(a1.c.email_address == 'ed@foo.com').all() + +The correct way to specify a join target is always by using the mapped +class itself or an :class:`_orm.aliased` object, in the latter case using the +:meth:`_orm.PropComparator.of_type` modifier to set up an alias:: + + # normal join to relationship entity + q = s.query(User).\ + join(User.addresses).\ + filter(Address.email_address == 'ed@foo.com') + + # name Address target explicitly, not necessary but legal + q = s.query(User).\ + join(Address, User.addresses).\ + filter(Address.email_address == 'ed@foo.com') + +Join to an alias:: + + from sqlalchemy.orm import aliased + + a1 = aliased(Address) + + # of_type() form; recommended + q = s.query(User).\ + join(User.addresses.of_type(a1)).\ + filter(a1.email_address == 'ed@foo.com') + + # target, onclause form + q = s.query(User).\ + join(a1, User.addresses).\ + filter(a1.email_address == 'ed@foo.com') + + +.. _error_xaj2: + +An alias is being generated automatically due to overlapping tables +------------------------------------------------------------------- + +.. versionadded:: 1.4.26 + +This warning is typically generated when querying using the +:meth:`_sql.Select.join` method or the legacy :meth:`_orm.Query.join` method +with mappings that involve joined table inheritance. The issue is that when +joining between two joined inheritance models that share a common base table, a +proper SQL JOIN between the two entities cannot be formed without applying an +alias to one side or the other; SQLAlchemy applies an alias to the right side +of the join. For example given a joined inheritance mapping as:: + + class Employee(Base): + __tablename__ = 'employee' + id = Column(Integer, primary_key=True) + manager_id = Column(ForeignKey("manager.id")) + name = Column(String(50)) + type = Column(String(50)) + + reports_to = relationship("Manager", foreign_keys=manager_id) + + __mapper_args__ = { + 'polymorphic_identity':'employee', + 'polymorphic_on':type, + } + + class Manager(Employee): + __tablename__ = 'manager' + id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + + __mapper_args__ = { + 'polymorphic_identity':'manager', + 'inherit_condition': id == Employee.id + } + +The above mapping includes a relationship between the ``Employee`` and +``Manager`` classes. Since both classes make use of the "employee" database +table, from a SQL perspective this is a +:ref:`self referential relationship `. If we wanted to +query from both the ``Employee`` and ``Manager`` models using a join, at the +SQL level the "employee" table needs to be included twice in the query, which +means it must be aliased. When we create such a join using the SQLAlchemy +ORM, we get SQL that looks like the following: + +.. sourcecode:: pycon+sql + + >>> stmt = select(Employee, Manager).join(Employee.reports_to) + >>> print(stmt) + {opensql}SELECT employee.id, employee.manager_id, employee.name, + employee.type, manager_1.id AS id_1, employee_1.id AS id_2, + employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, + employee_1.type AS type_1 + FROM employee JOIN + (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) + ON manager_1.id = employee.manager_id + +Above, the SQL selects FROM the ``employee`` table, representing the +``Employee`` entity in the query. It then joins to a right-nested join of +``employee AS employee_1 JOIN manager AS manager_1``, where the ``employee`` +table is stated again, except as an anonymous alias ``employee_1``. This is the +"automatic generation of an alias" that the warning message refers towards. + +When SQLAlchemy loads ORM rows that each contain an ``Employee`` and a +``Manager`` object, the ORM must adapt rows from what above is the +``employee_1`` and ``manager_1`` table aliases into those of the un-aliased +``Manager`` class. This process is internally complex and does not accommodate +for all API features, notably when trying to use eager loading features such as +:func:`_orm.contains_eager` with more deeply nested queries than are shown +here. As the pattern is unreliable for more complex scenarios and involves +implicit decisionmaking that is difficult to anticipate and follow, +the warning is emitted and this pattern may be considered a legacy feature. The +better way to write this query is to use the same patterns that apply to any +other self-referential relationship, which is to use the :func:`_orm.aliased` +construct explicitly. For joined-inheritance and other join-oriented mappings, +it is usually desirable to add the use of the :paramref:`_orm.aliased.flat` +parameter, which will allow a JOIN of two or more tables to be aliased by +applying an alias to the individual tables within the join, rather than +embedding the join into a new subquery: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy.orm import aliased + >>> manager_alias = aliased(Manager, flat=True) + >>> stmt = select(Employee, manager_alias).join(Employee.reports_to.of_type(manager_alias)) + >>> print(stmt) + {opensql}SELECT employee.id, employee.manager_id, employee.name, + employee.type, manager_1.id AS id_1, employee_1.id AS id_2, + employee_1.manager_id AS manager_id_1, employee_1.name AS name_1, + employee_1.type AS type_1 + FROM employee JOIN + (employee AS employee_1 JOIN manager AS manager_1 ON manager_1.id = employee_1.id) + ON manager_1.id = employee.manager_id - new_stmt_2 = select(some_table).select_from(some_table.join(subq)) +If we then wanted to use :func:`_orm.contains_eager` to populate the +``reports_to`` attribute, we refer to the alias:: -.. seealso:: + >>> stmt =select(Employee).join( + ... Employee.reports_to.of_type(manager_alias) + ... ).options( + ... contains_eager(Employee.reports_to.of_type(manager_alias)) + ... ) - :ref:`change_4617` +Without using the explicit :func:`_orm.aliased` object, in some more nested +cases the :func:`_orm.contains_eager` option does not have enough context to +know where to get its data from, in the case that the ORM is "auto-aliasing" +in a very nested context. Therefore it's best not to rely on this feature +and instead keep the SQL construction as explicit as possible. Object Relational Mapping @@ -1657,3 +1395,269 @@ See :ref:`orm_exceptions_toplevel` for ORM exception classes. +Legacy Exceptions +================= + +Exceptions in this section are not generated by current SQLAlchemy +versions, however are provided here to suit exception message hyperlinks. + +.. _error_b8d9: + +The in SQLAlchemy 2.0 will no longer +-------------------------------------------------------------------------------------------- + +SQLAlchemy 2.0 represents a major shift for a wide variety of key +SQLAlchemy usage patterns in both the Core and ORM components. The goal +of the 2.0 release is to make a slight readjustment in some of the most +fundamental assumptions of SQLAlchemy since its early beginnings, and +to deliver a newly streamlined usage model that is hoped to be significantly +more minimalist and consistent between the Core and ORM components, as well as +more capable. + +Introduced at :ref:`migration_20_toplevel`, the SQLAlchemy 2.0 project includes +a comprehensive future compatibility system that's integrated into the +1.4 series of SQLAlchemy, such that applications will have a clear, +unambiguous, and incremental upgrade path in order to migrate applications to +being fully 2.0 compatible. The :class:`.exc.RemovedIn20Warning` deprecation +warning is at the base of this system to provide guidance on what behaviors in +an existing codebase will need to be modified. An overview of how to enable +this warning is at :ref:`deprecation_20_mode`. + +.. seealso:: + + :ref:`migration_20_toplevel` - An overview of the upgrade process from + the 1.x series, as well as the current goals and progress of SQLAlchemy + 2.0. + + + :ref:`deprecation_20_mode` - specific guidelines on how to use + "2.0 deprecations mode" in SQLAlchemy 1.4. + + +.. _error_s9r1: + +Object is being merged into a Session along the backref cascade +--------------------------------------------------------------- + +This message refers to the "backref cascade" behavior of SQLAlchemy, +removed in version 2.0. This refers to the action of +an object being added into a :class:`_orm.Session` as a result of another +object that's already present in that session being associated with it. +As this behavior has been shown to be more confusing than helpful, +the :paramref:`_orm.relationship.cascade_backrefs` and +:paramref:`_orm.backref.cascade_backrefs` parameters were added, which can +be set to ``False`` to disable it, and in SQLAlchemy 2.0 the "cascade backrefs" +behavior has been removed entirely. + +For older SQLAlchemy versions, to set +:paramref:`_orm.relationship.cascade_backrefs` to ``False`` on a backref that +is currently configured using the :paramref:`_orm.relationship.backref` string +parameter, the backref must be declared using the :func:`_orm.backref` function +first so that the :paramref:`_orm.backref.cascade_backrefs` parameter may be +passed. + +Alternatively, the entire "cascade backrefs" behavior can be turned off +across the board by using the :class:`_orm.Session` in "future" mode, +by passing ``True`` for the :paramref:`_orm.Session.future` parameter. + +.. seealso:: + + :ref:`change_5150` - background on the change for SQLAlchemy 2.0. + + +.. _error_c9ae: + +select() construct created in "legacy" mode; keyword arguments, etc. +-------------------------------------------------------------------- + +The :func:`_expression.select` construct has been updated as of SQLAlchemy +1.4 to support the newer calling style that is standard in +SQLAlchemy 2.0. For backwards compatibility within +the 1.4 series, the construct accepts arguments in both the "legacy" style as well +as the "new" style. + +The "new" style features that column and table expressions are passed +positionally to the :func:`_expression.select` construct only; any other +modifiers to the object must be passed using subsequent method chaining:: + + # this is the way to do it going forward + stmt = select(table1.c.myid).where(table1.c.myid == table2.c.otherid) + +For comparison, a :func:`_expression.select` in legacy forms of SQLAlchemy, +before methods like :meth:`.Select.where` were even added, would like:: + + # this is how it was documented in original SQLAlchemy versions + # many years ago + stmt = select([table1.c.myid], whereclause=table1.c.myid == table2.c.otherid) + +Or even that the "whereclause" would be passed positionally:: + + # this is also how it was documented in original SQLAlchemy versions + # many years ago + stmt = select([table1.c.myid], table1.c.myid == table2.c.otherid) + +For some years now, the additional "whereclause" and other arguments that are +accepted have been removed from most narrative documentation, leading to a +calling style that is most familiar as the list of column arguments passed +as a list, but no further arguments:: + + # this is how it's been documented since around version 1.0 or so + stmt = select([table1.c.myid]).where(table1.c.myid == table2.c.otherid) + +The document at :ref:`migration_20_5284` describes this change in terms +of :ref:`2.0 Migration `. + +.. seealso:: + + :ref:`migration_20_5284` + + :ref:`migration_20_toplevel` + +.. _error_c9bf: + +A bind was located via legacy bound metadata, but since future=True is set on this Session, this bind is ignored. +------------------------------------------------------------------------------------------------------------------- + +The concept of "bound metadata" is present up until SQLAlchemy 1.4; as +of SQLAlchemy 2.0 it's been removed. + +This error refers to the :paramref:`_schema.MetaData.bind` parameter on the +:class:`_schema.MetaData` object that in turn allows objects like the ORM +:class:`_orm.Session` to associate a particular mapped class with an +:class:`_orm.Engine`. In SQLAlchemy 2.0, the :class:`_orm.Session` must be +linked to each :class:`_orm.Engine` directly. That is, instead of instantiating +the :class:`_orm.Session` or :class:`_orm.sessionmaker` without any arguments, +and associating the :class:`_engine.Engine` with the +:class:`_schema.MetaData`:: + + engine = create_engine("sqlite://") + Session = sessionmaker() + metadata_obj = MetaData(bind=engine) + Base = declarative_base(metadata=metadata_obj) + + class MyClass(Base): + # ... + + + session = Session() + session.add(MyClass()) + session.commit() + +The :class:`_engine.Engine` must instead be associated directly with the +:class:`_orm.sessionmaker` or :class:`_orm.Session`. The +:class:`_schema.MetaData` object should no longer be associated with any +engine:: + + + engine = create_engine("sqlite://") + Session = sessionmaker(engine) + Base = declarative_base() + + class MyClass(Base): + # ... + + + session = Session() + session.add(MyClass()) + session.commit() + +In SQLAlchemy 1.4, this :term:`2.0 style` behavior is enabled when the +:paramref:`_orm.Session.future` flag is set on :class:`_orm.sessionmaker` +or :class:`_orm.Session`. + + +.. _error_2afi: + +This Compiled object is not bound to any Engine or Connection +------------------------------------------------------------- + +This error refers to the concept of "bound metadata", which is a legacy +SQLAlchemy pattern present only in 1.x versions. The issue occurs when one invokes +the :meth:`.Executable.execute` method directly off of a Core expression object +that is not associated with any :class:`_engine.Engine`:: + + metadata_obj = MetaData() + table = Table('t', metadata_obj, Column('q', Integer)) + + stmt = select(table) + result = stmt.execute() # <--- raises + +What the logic is expecting is that the :class:`_schema.MetaData` object has +been **bound** to a :class:`_engine.Engine`:: + + engine = create_engine("mysql+pymysql://user:pass@host/db") + metadata_obj = MetaData(bind=engine) + +Where above, any statement that derives from a :class:`_schema.Table` which +in turn derives from that :class:`_schema.MetaData` will implicitly make use of +the given :class:`_engine.Engine` in order to invoke the statement. + +Note that the concept of bound metadata is **not present in SQLAlchemy 2.0**. +The correct way to invoke statements is via +the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: + + with engine.connect() as conn: + result = conn.execute(stmt) + +When using the ORM, a similar facility is available via the :class:`.Session`:: + + result = session.execute(stmt) + +.. seealso:: + + :ref:`tutorial_statement_execution` + +.. _error_8s2a: + +This connection is on an inactive transaction. Please rollback() fully before proceeding +------------------------------------------------------------------------------------------ + +This error condition was added to SQLAlchemy as of version 1.4, and does not +apply to SQLAlchemy 2.0. The error +refers to the state where a :class:`_engine.Connection` is placed into a +transaction using a method like :meth:`_engine.Connection.begin`, and then a +further "marker" transaction is created within that scope; the "marker" +transaction is then rolled back using :meth:`.Transaction.rollback` or closed +using :meth:`.Transaction.close`, however the outer transaction is still +present in an "inactive" state and must be rolled back. + +The pattern looks like:: + + engine = create_engine(...) + + connection = engine.connect() + transaction1 = connection.begin() + + # this is a "sub" or "marker" transaction, a logical nesting + # structure based on "real" transaction transaction1 + transaction2 = connection.begin() + transaction2.rollback() + + # transaction1 is still present and needs explicit rollback, + # so this will raise + connection.execute(text("select 1")) + +Above, ``transaction2`` is a "marker" transaction, which indicates a logical +nesting of transactions within an outer one; while the inner transaction +can roll back the whole transaction via its rollback() method, its commit() +method has no effect except to close the scope of the "marker" transaction +itself. The call to ``transaction2.rollback()`` has the effect of +**deactivating** transaction1 which means it is essentially rolled back +at the database level, however is still present in order to accommodate +a consistent nesting pattern of transactions. + +The correct resolution is to ensure the outer transaction is also +rolled back:: + + transaction1.rollback() + +This pattern is not commonly used in Core. Within the ORM, a similar issue can +occur which is the product of the ORM's "logical" transaction structure; this +is described in the FAQ entry at :ref:`faq_session_rollback`. + +The "subtransaction" pattern is removed in SQLAlchemy 2.0 so that this +particular programming pattern is no longer be available, preventing +this error message. + + + diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 504c47485b6..27ba5f4ed5c 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -167,18 +167,12 @@ a new transaction when it is first used that remains in effect for subsequent statements, until the DBAPI-level ``connection.commit()`` or ``connection.rollback()`` method is invoked. -As discussed at :ref:`autocommit`, there is a library level "autocommit" -feature which is deprecated in 1.4 that causes :term:`DML` and :term:`DDL` -executions to commit automatically after individual statements are executed; -however, outside of this deprecated case, modern use of SQLAlchemy works with -this transaction in all cases and does not commit any data unless explicitly -told to commit. - -At the ORM level, a similar situation where the ORM -:class:`_orm.Session` object also presents a legacy "autocommit" operation is -present; however even if this legacy mode of operation is used, the -:class:`_orm.Session` still makes use of transactions internally, -particularly within the :meth:`_orm.Session.flush` process. +In modern use of SQLAlchemy, a series of SQL statements are always invoked +within this transactional state, assuming +:ref:`DBAPI autocommit mode ` is not enabled (more on that in +the next section), meaning that no single statement is automatically committed; +if an operation fails, the effects of all statements within the current +transaction will be lost. The implication that this has for the notion of "retrying" a statement is that in the default case, when a connection is lost, **the entire transaction is @@ -188,9 +182,10 @@ SQLAlchemy does not have a transparent "reconnection" feature that works mid-transaction, for the case when the database connection has disconnected while being used. The canonical approach to dealing with mid-operation disconnects is to **retry the entire operation from the start of the -transaction**, often by using a Python "retry" decorator, or to otherwise +transaction**, often by using a custom Python decorator that will +"retry" a particular function several times until it succeeds, or to otherwise architect the application in such a way that it is resilient against -transactions that are dropped. +transactions that are dropped that then cause operations to fail. There is also the notion of extensions that can keep track of all of the statements that have proceeded within a transaction and then replay them all in diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index dc1336dad00..1145a408fad 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -350,7 +350,7 @@ How Do I use Textual SQL with ORM Queries? See: -* :ref:`orm_tutorial_literal_sql` - Ad-hoc textual blocks with :class:`_query.Query` +* :ref:`orm_queryguide_selecting_text` - Ad-hoc textual blocks with :class:`_query.Query` * :ref:`session_sql_expressions` - Using :class:`.Session` with textual SQL directly. diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 1e663502414..b9b0002e844 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -158,7 +158,7 @@ Glossary `bind parameters `_ - at Use The Index, Luke! - + :ref:`tutorial_sending_parameters` - in the :ref:`unified_tutorial` selectable A term used in SQLAlchemy to describe a SQL construct that represents diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index f22e28fa5ac..b3c21716a2a 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -475,4 +475,5 @@ API Documentation .. autoclass:: Result :members: + :noindex: diff --git a/doc/build/orm/extensions/hybrid.rst b/doc/build/orm/extensions/hybrid.rst index 16cdafebcca..96214845937 100644 --- a/doc/build/orm/extensions/hybrid.rst +++ b/doc/build/orm/extensions/hybrid.rst @@ -15,7 +15,7 @@ API Reference :members: .. autoclass:: Comparator - + .. autodata:: HYBRID_METHOD diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index 368c151009b..0d808f5c8aa 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -4,7 +4,7 @@ Mypy / Pep-484 Support for ORM Mappings ======================================== Support for :pep:`484` typing annotations as well as the -`Mypy `_ type checking tool. +MyPy_ type checking tool. .. topic:: SQLAlchemy Mypy Plugin Status Update @@ -59,7 +59,7 @@ The Mypy plugin depends upon new stubs for SQLAlchemy packaged at `sqlalchemy2-stubs `_. These stubs necessarily fully replace the previous ``sqlalchemy-stubs`` typing annotations published by Dropbox, as they occupy the same ``sqlalchemy-stubs`` -namespace as specified by :pep:`561`. The `Mypy `_ +namespace as specified by :pep:`561`. The Mypy_ package itself is also a dependency. Both packages may be installed using the "mypy" extras hook using pip:: @@ -595,3 +595,5 @@ With the above recipe, the attributes listed in ``_mypy_mapped_attrs`` will be applied with the :class:`_orm.Mapped` typing information so that the ``User`` class will behave as a SQLAlchemy mapped class when used in a class-bound context. + +.. _Mypy: https://mypy.readthedocs.io/ diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 9815605b2ce..112ac5a319f 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -988,7 +988,7 @@ Comparison to Core Insert / Update Constructs The bulk methods offer performance that under particular circumstances can be close to that of using the core :class:`_expression.Insert` and :class:`_expression.Update` constructs in an "executemany" context (for a description -of "executemany", see :ref:`execute_multiple` in the Core tutorial). +of "executemany", see :ref:`tutorial_multiple_parameters` in the Core tutorial). In order to achieve this, the :paramref:`.Session.bulk_insert_mappings.return_defaults` flag should be disabled so that rows can be batched together. The example diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index d176087a883..012206ba7c5 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -295,6 +295,7 @@ The :class:`_orm.aliased` construct is also central to making use of subqueries with the ORM; the sections :ref:`orm_queryguide_subqueries` and :ref:`orm_queryguide_join_subqueries` discusses this further. + .. _orm_queryguide_selecting_text: Getting ORM Results from Textual and Core Statements @@ -477,7 +478,6 @@ and order by criteria based on its exported columns:: :ref:`tutorial_orm_union` - in the :ref:`unified_tutorial` - .. _orm_queryguide_joins: Joins diff --git a/doc/build/orm/relationships.rst b/doc/build/orm/relationships.rst index 8a4fe36a1d8..b9111741ccf 100644 --- a/doc/build/orm/relationships.rst +++ b/doc/build/orm/relationships.rst @@ -7,7 +7,7 @@ Relationship Configuration This section describes the :func:`relationship` function and in depth discussion of its usage. For an introduction to relationships, start with the -:ref:`ormtutorial_toplevel` and head into :ref:`orm_tutorial_relationship`. +:ref:`ormtutorial_toplevel` and head into :ref:`tutorial_orm_related_objects`. .. toctree:: :maxdepth: 3 diff --git a/doc/build/orm/self_referential.rst b/doc/build/orm/self_referential.rst index 2f1c021020b..71b7a06efd6 100644 --- a/doc/build/orm/self_referential.rst +++ b/doc/build/orm/self_referential.rst @@ -137,7 +137,7 @@ the foreign key from one level of the tree to the next. In SQL, a join from a table to itself requires that at least one side of the expression be "aliased" so that it can be unambiguously referred to. -Recall from :ref:`ormtutorial_aliases` in the ORM tutorial that the +Recall from :ref:`orm_queryguide_orm_aliases` in the ORM tutorial that the :func:`_orm.aliased` construct is normally used to provide an "alias" of an ORM entity. Joining from ``Node`` to itself using this technique looks like: diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index bed901712d5..b747246c042 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -708,7 +708,7 @@ values for ``synchronize_session`` are supported: automatically. If the operation is against multiple tables, typically individual UPDATE / DELETE statements against the individual tables should be used. Some databases support multiple table UPDATEs. - Similar guidelines as those detailed at :ref:`multi_table_updates` + Similar guidelines as those detailed at :ref:`tutorial_update_from` may be applied. * The WHERE criteria needed in order to limit the polymorphic identity to diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 90180154b7d..63aeb51a089 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -8,6 +8,7 @@ .. rst-class:: core-header + .. _tutorial_core_insert: Inserting Rows with Core diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index c8fac288e62..78a0f174618 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -248,7 +248,7 @@ when referring to arbitrary SQL expressions in a result row by name: :ref:`tutorial_order_by_label` - the label names we create may also be referred towards in the ORDER BY or GROUP BY clause of the :class:`_sql.Select`. -.. _tutorial_select_arbtrary_text: +.. _tutorial_select_arbitrary_text: Selecting with Textual Column Expressions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1050,6 +1050,75 @@ The statement then can return the data for this column like any other: ('sandy', 'sandy@squirrelpower.org', 2)] {opensql}ROLLBACK{stop} + +.. _tutorial_lateral_correlation: + +LATERAL correlation +~~~~~~~~~~~~~~~~~~~ + +LATERAL correlation is a special sub-category of SQL correlation which +allows a selectable unit to refer to another selectable unit within a +single FROM clause. This is an extremely special use case which, while +part of the SQL standard, is only known to be supported by recent +versions of PostgreSQL. + +Normally, if a SELECT statement refers to +``table1 JOIN (SELECT ...) AS subquery`` in its FROM clause, the subquery +on the right side may not refer to the "table1" expression from the left side; +correlation may only refer to a table that is part of another SELECT that +entirely encloses this SELECT. The LATERAL keyword allows us to turn this +behavior around and allow correlation from the right side JOIN. + +SQLAlchemy supports this feature using the :meth:`_expression.Select.lateral` +method, which creates an object known as :class:`.Lateral`. :class:`.Lateral` +is in the same family as :class:`.Subquery` and :class:`.Alias`, but also +includes correlation behavior when the construct is added to the FROM clause of +an enclosing SELECT. The following example illustrates a SQL query that makes +use of LATERAL, selecting the "user account / count of email address" data as +was discussed in the previous section:: + + >>> subq = ( + ... select( + ... func.count(address_table.c.id).label("address_count"), + ... address_table.c.email_address, + ... address_table.c.user_id, + ... ). + ... where(user_table.c.id == address_table.c.user_id). + ... lateral() + ... ) + >>> stmt = select( + ... user_table.c.name, + ... subq.c.address_count, + ... subq.c.email_address + ... ).\ + ... join_from(user_table, subq).\ + ... order_by(user_table.c.id, subq.c.email_address) + >>> print(stmt) + {opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address + FROM user_account + JOIN LATERAL (SELECT count(address.id) AS address_count, + address.email_address AS email_address, address.user_id AS user_id + FROM address + WHERE user_account.id = address.user_id) AS anon_1 + ON user_account.id = anon_1.user_id + ORDER BY user_account.id, anon_1.email_address + +Above, the right side of the JOIN is a subquery that correlates to the +``user_account`` table that's on the left side of the join. + +When using :meth:`_expression.Select.lateral`, the behavior of +:meth:`_expression.Select.correlate` and +:meth:`_expression.Select.correlate_except` methods is applied to the +:class:`.Lateral` construct as well. + +.. seealso:: + + :class:`_expression.Lateral` + + :meth:`_expression.Select.lateral` + + + .. _tutorial_union: UNION, UNION ALL and other set operations @@ -1258,6 +1327,7 @@ clause: [('patrick',)] {opensql}ROLLBACK{stop} + .. _tutorial_functions: Working with SQL Functions @@ -1577,8 +1647,8 @@ using the :meth:`_functions.FunctionElement.filter` method:: count(address.email_address) FILTER (WHERE user_account.name = ?) AS anon_2 FROM user_account JOIN address ON user_account.id = address.user_id [...] ('sandy', 'spongebob') - [(2, 1)] - ROLLBACK + {stop}[(2, 1)] + {opensql}ROLLBACK .. _tutorial_functions_table_valued: @@ -1614,16 +1684,16 @@ modern versions of SQLite:: >>> onetwothree = func.json_each('["one", "two", "three"]').table_valued("value") >>> stmt = select(onetwothree).where(onetwothree.c.value.in_(["two", "three"])) - >>> with engine.connect() as conn: # doctest:+SKIP + >>> with engine.connect() as conn: ... result = conn.execute(stmt) - ... print(result.all()) + ... result.all() {opensql}BEGIN (implicit) SELECT anon_1.value FROM json_each(?) AS anon_1 WHERE anon_1.value IN (?, ?) [...] ('["one", "two", "three"]', 'two', 'three') - [('two',), ('three',)] - ROLLBACK + {stop}[('two',), ('three',)] + {opensql}ROLLBACK{stop} Above, we used the ``json_each()`` JSON function supported by SQLite and PostgreSQL to generate a table valued expression with a single column referred @@ -1671,4 +1741,78 @@ it is usable for custom SQL functions:: :ref:`postgresql_column_valued` - in the :ref:`postgresql_toplevel` documentation. +.. _tutorial_casts: + +Data Casts and Type Coercion +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In SQL, we often need to indicate the datatype of an expression explicitly, +either to tell the database what type is expected in an otherwise ambiguous +expression, or in some cases when we want to convert the implied datatype +of a SQL expression into something else. The SQL CAST keyword is used for +this task, which in SQLAlchemy is provided by the :func:`.cast` function. +This function accepts a column expression and a data type +object as arguments, as demonstrated below where we produce a SQL expression +``CAST(user_account.id AS VARCHAR)`` from the ``user_table.c.id`` column +object:: + + >>> from sqlalchemy import cast + >>> stmt = select(cast(user_table.c.id, String)) + >>> with engine.connect() as conn: + ... result = conn.execute(stmt) + ... result.all() + {opensql}BEGIN (implicit) + SELECT CAST(user_account.id AS VARCHAR) AS id + FROM user_account + [...] () + {stop}[('1',), ('2',), ('3',)] + {opensql}ROLLBACK{stop} + +The :func:`.cast` function not only renders the SQL CAST syntax, it also +produces a SQLAlchemy column expression that will act as the given datatype on +the Python side as well. A string expression that is :func:`.cast` to +:class:`_sqltypes.JSON` will gain JSON subscript and comparison operators, +for example:: + >>> from sqlalchemy import JSON + >>> print(cast("{'a': 'b'}", JSON)["a"]) + CAST(:param_1 AS JSON)[:param_2] + + +type_coerce() - a Python-only "cast" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes there is the need to have SQLAlchemy know the datatype of an +expression, for all the reasons mentioned above, but to not render the CAST +expression itself on the SQL side, where it may interfere with a SQL operation +that already works without it. For this fairly common use case there is +another function :func:`.type_coerce` which is closely related to +:func:`.cast`, in that it sets up a Python expression as having a specific SQL +database type, but does not render the ``CAST`` keyword or datatype on the +database side. :func:`.type_coerce` is particularly important when dealing +with the :class:`_types.JSON` datatype, which typically has an intricate +relationship with string-oriented datatypes on different platforms and +may not even be an explicit datatype, such as on SQLite and MariaDB. +Below, we use :func:`.type_coerce` to deliver a Python structure as a JSON +string into one of MySQL's JSON functions: + +.. sourcecode:: pycon+sql + + >>> import json + >>> from sqlalchemy import JSON + >>> from sqlalchemy import type_coerce + >>> from sqlalchemy.dialects import mysql + >>> s = select( + ... type_coerce( + ... {'some_key': {'foo': 'bar'}}, JSON + ... )['some_key'] + ... ) + >>> print(s.compile(dialect=mysql.dialect())) + SELECT JSON_EXTRACT(%s, %s) AS anon_1 + +Above, MySQL's ``JSON_EXTRACT`` SQL function was invoked +because we used :func:`.type_coerce` to indicate that our Python dictionary +should be treated as :class:`_types.JSON`. The Python ``__getitem__`` +operator, ``['some_key']`` in this case, became available as a result and +allowed a ``JSON_EXTRACT`` path expression (not shown, however in this +case it would ultimately be ``'$."some_key"'``) to be rendered. diff --git a/doc/build/tutorial/data_update.rst b/doc/build/tutorial/data_update.rst index 8813dda9889..1091bccf645 100644 --- a/doc/build/tutorial/data_update.rst +++ b/doc/build/tutorial/data_update.rst @@ -175,6 +175,8 @@ order to refer to additional tables:: WHERE user_account.id = address.user_id AND address.email_address = %s +.. _tutorial_parameter_ordered_updates: + Parameter Ordered Updates ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index a9dff8f3851..d7ac0b87c42 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -179,6 +179,7 @@ purposes. .. rst-class:: core-header +.. _tutorial_statement_execution: Basics of Statement Execution ----------------------------- diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 59691cf818d..2eacc39e369 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -5,6 +5,7 @@ .. include:: tutorial_nav_include.rst + .. _tutorial_orm_related_objects: Working with Related Objects @@ -129,6 +130,9 @@ of the ``Address.user`` attribute after the fact:: # equivalent effect as a2 = Address(user=u1) >>> a2.user = u1 + +.. _tutorial_orm_cascades: + Cascading Objects into the Session ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index e4d89b2dce2..26af6eb799f 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -560,7 +560,7 @@ def connect(dbapi_connection, connection_record): as well as a list of 2-tuples, which will automatically provide a parameter-ordered UPDATE statement in a manner similar to that described -at :ref:`updates_order_parameters`. Unlike the :class:`_expression.Update` +at :ref:`tutorial_parameter_ordered_updates`. Unlike the :class:`_expression.Update` object, no special flag is needed to specify the intent since the argument form is this context is unambiguous: diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index 790733cbfda..0b508fe49de 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -97,7 +97,7 @@ def on_duplicate_key_update(self, *args, **kw): in the UPDATE clause should be ordered as sent, in a manner similar to that described for the :class:`_expression.Update` construct overall - in :ref:`updates_order_parameters`:: + in :ref:`tutorial_parameter_ordered_updates`:: insert().on_duplicate_key_update( [("name", "some name"), ("value", "some value")]) diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index f7121a82a1a..663a17b2aae 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -208,7 +208,7 @@ SQLAlchemy internally makes use of these extensions for ``executemany()`` style calls, which correspond to lists of parameters being passed to :meth:`_engine.Connection.execute` as detailed in :ref:`multiple parameter -sets `. The ORM also uses this mode internally whenever +sets `. The ORM also uses this mode internally whenever possible. The two available extensions on the psycopg2 side are the ``execute_values()`` @@ -284,7 +284,7 @@ .. seealso:: - :ref:`execute_multiple` - General information on using the + :ref:`tutorial_multiple_parameters` - General information on using the :class:`_engine.Connection` object to execute statements in such a way as to make use of the DBAPI ``.executemany()`` method. diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index c76632db1c0..e6c32977391 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -191,7 +191,7 @@ class Row(BaseRow, collections_abc.Sequence): .. seealso:: - :ref:`coretutorial_selecting` - includes examples of selecting + :ref:`tutorial_selecting_data` - includes examples of selecting rows from SELECT statements. :class:`.LegacyRow` - Compatibility interface introduced in SQLAlchemy diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 5c46676f77c..db971c2ab50 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -67,13 +67,7 @@ class URL( * :attr:`_engine.URL.drivername`: database backend and driver name, such as ``postgresql+psycopg2`` * :attr:`_engine.URL.username`: username string - * :attr:`_engine.URL.password`: password string, or object that includes - a ``__str__()`` method that produces a password. - - .. note:: A password-producing object will be stringified only - **once** per :class:`_engine.Engine` object. For dynamic password - generation per connect, see :ref:`engines_dynamic_tokens`. - + * :attr:`_engine.URL.password`: password string * :attr:`_engine.URL.host`: string hostname * :attr:`_engine.URL.port`: integer port number * :attr:`_engine.URL.database`: string database name diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 88910ba0624..ba5e448acd3 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -3239,9 +3239,8 @@ def update(self, values, synchronize_session="evaluate", update_args=None): :param values: a dictionary with attributes names, or alternatively mapped attributes or SQL expressions, as keys, and literal values or sql expressions as values. If :ref:`parameter-ordered - mode ` is desired, the values can be - passed as a list of 2-tuples; - this requires that the + mode ` is desired, the values can + be passed as a list of 2-tuples; this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag is passed to the :paramref:`.Query.update.update_args` dictionary as well. diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index efa6c63f017..f58277e32ea 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -212,7 +212,7 @@ class Parent(Base): :ref:`relationship_config_toplevel` - Full introductory and reference documentation for :func:`_orm.relationship`. - :ref:`orm_tutorial_relationship` - ORM tutorial introduction. + :ref:`tutorial_orm_related_objects` - ORM tutorial introduction. :param argument: A mapped class, or actual :class:`_orm.Mapper` instance, @@ -279,9 +279,6 @@ class name or dotted package-qualified name. :ref:`relationships_many_to_many` - Reference example of "many to many". - :ref:`orm_tutorial_many_to_many` - ORM tutorial introduction to - many-to-many relationships. - :ref:`self_referential_many_to_many` - Specifics on using many-to-many in a self-referential case. @@ -392,9 +389,6 @@ class name or dotted package-qualified name. :ref:`unitofwork_cascades` - Full detail on each of the available cascade options. - :ref:`tutorial_delete_cascade` - Tutorial example describing - a delete cascade. - :param cascade_backrefs=True: A boolean value indicating if the ``save-update`` cascade should operate along an assignment event intercepted by a backref. @@ -1535,7 +1529,7 @@ def contains(self, other, **kwargs): See :meth:`~.RelationshipProperty.Comparator.any` for a less-performant alternative using EXISTS, or refer to :meth:`_query.Query.outerjoin` - as well as :ref:`ormtutorial_joins` + as well as :ref:`orm_queryguide_joins` for more details on constructing outer joins. kwargs may be ignored by this operator but are required for API diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 4afcd0fb862..a5c571ad75d 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1255,8 +1255,6 @@ def aliased(element, alias=None, name=None, flat=False, adapt_on_names=False): :ref:`orm_queryguide_orm_aliases` - in the :ref:`queryguide_toplevel` - :ref:`ormtutorial_aliases` - in the legacy :ref:`ormtutorial_toplevel` - :param element: element to be aliased. Is normally a mapped class, but for convenience can also be a :class:`_expression.FromClause` element. diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index 4f3280373bd..dea5d6119df 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -703,7 +703,7 @@ def values(self, *args, **kwargs): .. seealso:: - :ref:`execute_multiple` - an introduction to + :ref:`tutorial_multiple_parameters` - an introduction to the traditional Core method of multiple parameter set invocation for INSERTs and other statements. @@ -974,9 +974,6 @@ def __init__( .. seealso:: - :ref:`coretutorial_insert_expressions` - in the - :ref:`1.x tutorial ` - :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` @@ -1018,9 +1015,7 @@ def __init__( .. seealso:: - :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial - - :ref:`inserts_and_updates` - SQL Expression Tutorial + :ref:`tutorial_core_insert` - in the :ref:`unified_tutorial` """ super(Insert, self).__init__(table, values, prefixes) @@ -1135,16 +1130,6 @@ def where(self, *whereclause): .. seealso:: - **1.x Tutorial Examples** - - :ref:`tutorial_1x_correlated_updates` - - :ref:`multi_table_updates` - - :ref:`multi_table_deletes` - - **2.0 Tutorial Examples** - :ref:`tutorial_correlated_updates` :ref:`tutorial_update_from` @@ -1276,15 +1261,6 @@ def __init__( :meth:`_expression.TableClause.update` method on :class:`_schema.Table`. - .. seealso:: - - :ref:`inserts_and_updates` - in the - :ref:`1.x tutorial ` - - :ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial` - - - :param table: A :class:`_schema.Table` object representing the database table to be updated. @@ -1396,7 +1372,7 @@ def ordered_values(self, *args): .. seealso:: - :ref:`updates_order_parameters` - full example of the + :ref:`tutorial_parameter_ordered_updates` - full example of the :meth:`_expression.Update.ordered_values` method. .. versionchanged:: 1.4 The :meth:`_expression.Update.ordered_values` diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 0462b26482f..42ec3e0e7d2 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -148,7 +148,7 @@ def literal(value, type_=None): def outparam(key, type_=None): - """Create an 'OUT' parameter for usage in functions (stored procedures), + r"""Create an 'OUT' parameter for usage in functions (stored procedures), for databases which support them. The ``outparam`` can be used like a regular function parameter. @@ -998,7 +998,7 @@ def cast(self, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`_expression.cast` @@ -1465,15 +1465,6 @@ def __init__( .. versionchanged:: 1.3 the "expanding" bound parameter feature now supports empty lists. - - .. seealso:: - - :ref:`coretutorial_bind_param` - - :ref:`coretutorial_insert_expressions` - - :func:`.outparam` - :param literal_execute: if True, the bound parameter will be rendered in the compile phase with a special "POSTCOMPILE" token, and the SQLAlchemy compiler will @@ -1495,6 +1486,11 @@ def __init__( :ref:`change_4808`. + .. seealso:: + + :ref:`tutorial_sending_parameters` - in the + :ref:`unified_tutorial` + """ if required is NO_ARG: required = value is NO_ARG and callable_ is None @@ -1906,7 +1902,7 @@ def _create_text(cls, text, bind=None): .. seealso:: - :ref:`sqlexpression_text` - in the Core tutorial + :ref:`tutorial_select_arbitrary_text` """ @@ -3057,7 +3053,7 @@ class Cast(WrapsColumnExpression, ColumnElement): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.cast` @@ -3118,7 +3114,7 @@ def __init__(self, expression, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.type_coerce` - an alternative to CAST that coerces the type on the Python side only, which is often sufficient to generate the @@ -3239,7 +3235,7 @@ def __init__(self, expression, type_): .. seealso:: - :ref:`coretutorial_casts` + :ref:`tutorial_casts` :func:`.cast` @@ -4881,7 +4877,7 @@ def __init__(self, text, type_=None, is_literal=False, _selectable=None): :func:`_expression.text` - :ref:`sqlexpression_literal_column` + :ref:`tutorial_select_arbitrary_text` """ self.key = self.name = text diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 584782b2818..963108d7c4c 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -71,7 +71,7 @@ class FunctionElement(Executable, ColumnElement, FromClause, Generative): .. seealso:: - :ref:`coretutorial_functions` - in the Core tutorial + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` :class:`.Function` - named SQL function. @@ -800,7 +800,7 @@ class _FunctionGenerator(object): .. seealso:: - :ref:`coretutorial_functions` - in the Core Tutorial + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` :class:`.Function` diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 353f37b2540..829f26030c4 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -170,7 +170,7 @@ def lateral(self, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ return Lateral._construct(self, name) @@ -607,7 +607,7 @@ def alias(self, name=None, flat=False): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -1920,7 +1920,7 @@ class Lateral(AliasedReturnsRows): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ @@ -1947,7 +1947,8 @@ def _factory(cls, selectable, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. + """ return coercions.expect( @@ -2129,7 +2130,7 @@ def alias(self, name=None, flat=False): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -2964,7 +2965,7 @@ def alias(self, name, **kw): .. seealso:: - :ref:`core_tutorial_aliases` + :ref:`tutorial_using_aliases` :func:`_expression.alias` @@ -3201,8 +3202,6 @@ def scalar_subquery(self): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`scalar_selects` - in the 1.x tutorial - """ if self._label_style is not LABEL_STYLE_NONE: self = self.set_label_style(LABEL_STYLE_NONE) @@ -3230,7 +3229,7 @@ def lateral(self, name=None): .. seealso:: - :ref:`lateral_selects` - overview of usage. + :ref:`tutorial_lateral_correlation` - overview of usage. """ return Lateral._factory(self, name) @@ -4848,8 +4847,6 @@ class Select( :func:`_sql.select` - :ref:`coretutorial_selecting` - in the 1.x tutorial - :ref:`tutorial_selecting_data` - in the 2.0 tutorial """ @@ -4956,8 +4953,7 @@ def create_legacy_select( .. seealso:: - :ref:`coretutorial_selecting` - Core Tutorial description of - :func:`_expression.select`. + :ref:`tutorial_selecting_data` - in the :ref:`unified_tutorial` :param columns: A list of :class:`_expression.ColumnElement` or @@ -6078,7 +6074,7 @@ def correlate(self, *fromclauses): :meth:`_expression.Select.correlate_except` - :ref:`correlated_subqueries` + :ref:`tutorial_scalar_subquery` """ @@ -6116,7 +6112,7 @@ def correlate_except(self, *fromclauses): :meth:`_expression.Select.correlate` - :ref:`correlated_subqueries` + :ref:`tutorial_scalar_subquery` """ @@ -6559,8 +6555,6 @@ class ScalarSelect(roles.InElementRole, Generative, Grouping): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`scalar_selects` - in the 1.x tutorial - """ _from_objects = [] @@ -6619,8 +6613,6 @@ def correlate(self, *fromclauses): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`correlated_subqueries` - in the 1.x tutorial - """ self.element = self.element.correlate(*fromclauses) @@ -6652,8 +6644,6 @@ def correlate_except(self, *fromclauses): :ref:`tutorial_scalar_subquery` - in the 2.0 tutorial - :ref:`correlated_subqueries` - in the 1.x tutorial - """ From b698d512ad9568520b4ca433d8a8b8eb2672fdd8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 7 Jun 2022 09:40:26 -0400 Subject: [PATCH 264/632] graceful degrade for FKs not reflectable Fixed bugs involving the :paramref:`.Table.include_columns` and the :paramref:`.Table.resolve_fks` parameters on :class:`.Table`; these little-used parameters were apparently not working for columns that refer to foreign key constraints. In the first case, not-included columns that refer to foreign keys would still attempt to create a :class:`.ForeignKey` object, producing errors when attempting to resolve the columns for the foreign key constraint within reflection; foreign key constraints that refer to skipped columns are now omitted from the table reflection process in the same way as occurs for :class:`.Index` and :class:`.UniqueConstraint` objects with the same conditions. No warning is produced however, as we likely want to remove the include_columns warnings for all constraints in 2.0. In the latter case, the production of table aliases or subqueries would fail on an FK related table not found despite the presence of ``resolve_fks=False``; the logic has been repaired so that if a related table is not found, the :class:`.ForeignKey` object is still proxied to the aliased table or subquery (these :class:`.ForeignKey` objects are normally used in the production of join conditions), but it is sent with a flag that it's not resolvable. The aliased table / subquery will then work normally, with the exception that it cannot be used to generate a join condition automatically, as the foreign key information is missing. This was already the behavior for such foreign key constraints produced using non-reflection methods, such as joining :class:`.Table` objects from different :class:`.MetaData` collections. Fixes: #8100 Fixes: #8101 Change-Id: Ifa37a91bd1f1785fca85ef163eec031660d9ea4d (cherry picked from commit 40e3c0da5be7dd526866bfc63590fc5621a9bd6e) --- doc/build/changelog/unreleased_14/8100.rst | 30 ++++ lib/sqlalchemy/engine/reflection.py | 12 +- lib/sqlalchemy/sql/schema.py | 30 +++- lib/sqlalchemy/sql/util.py | 2 +- test/engine/test_reflection.py | 190 +++++++++++++++++---- test/sql/test_selectable.py | 62 ++++++- 6 files changed, 278 insertions(+), 48 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8100.rst diff --git a/doc/build/changelog/unreleased_14/8100.rst b/doc/build/changelog/unreleased_14/8100.rst new file mode 100644 index 00000000000..7c5fc49aa82 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8100.rst @@ -0,0 +1,30 @@ +.. change:: + :tags: bug, reflection + :tickets: 8100, 8101 + + Fixed bugs involving the :paramref:`.Table.include_columns` and the + :paramref:`.Table.resolve_fks` parameters on :class:`.Table`; these + little-used parameters were apparently not working for columns that refer + to foreign key constraints. + + In the first case, not-included columns that refer to foreign keys would + still attempt to create a :class:`.ForeignKey` object, producing errors + when attempting to resolve the columns for the foreign key constraint + within reflection; foreign key constraints that refer to skipped columns + are now omitted from the table reflection process in the same way as + occurs for :class:`.Index` and :class:`.UniqueConstraint` objects with the + same conditions. No warning is produced however, as we likely want to + remove the include_columns warnings for all constraints in 2.0. + + In the latter case, the production of table aliases or subqueries would + fail on an FK related table not found despite the presence of + ``resolve_fks=False``; the logic has been repaired so that if a related + table is not found, the :class:`.ForeignKey` object is still proxied to the + aliased table or subquery (these :class:`.ForeignKey` objects are normally + used in the production of join conditions), but it is sent with a flag that + it's not resolvable. The aliased table / subquery will then work normally, + with the exception that it cannot be used to generate a join condition + automatically, as the foreign key information is missing. This was already + the behavior for such foreign key constraints produced using non-reflection + methods, such as joining :class:`.Table` objects from different + :class:`.MetaData` collections. diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index ad50a3e3160..b475228c82d 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -797,6 +797,7 @@ def reflect_table( schema, table, cols_by_orig_name, + include_columns, exclude_columns, resolve_fks, _extend_on, @@ -940,6 +941,7 @@ def _reflect_fk( schema, table, cols_by_orig_name, + include_columns, exclude_columns, resolve_fks, _extend_on, @@ -956,10 +958,17 @@ def _reflect_fk( cols_by_orig_name[c].key if c in cols_by_orig_name else c for c in fkey_d["constrained_columns"] ] - if exclude_columns and set(constrained_columns).intersection( + + if ( exclude_columns + and set(constrained_columns).intersection(exclude_columns) + or ( + include_columns + and set(constrained_columns).difference(include_columns) + ) ): continue + referred_schema = fkey_d["referred_schema"] referred_table = fkey_d["referred_table"] referred_columns = fkey_d["referred_columns"] @@ -994,6 +1003,7 @@ def _reflect_fk( options = fkey_d["options"] else: options = {} + table.append_constraint( sa_schema.ForeignKeyConstraint( constrained_columns, diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 322f630c7dd..bc7e65d90c8 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -2049,10 +2049,19 @@ def _make_proxy( information is not transferred. """ + fk = [ - ForeignKey(f.column, _constraint=f.constraint) - for f in self.foreign_keys + ForeignKey( + col if col is not None else f._colspec, + _unresolvable=col is None, + _constraint=f.constraint, + ) + for f, col in [ + (fk, fk._resolve_column(raiseerr=False)) + for fk in self.foreign_keys + ] ] + if name is None and self.name is None: raise exc.InvalidRequestError( "Cannot initialize a sub-selectable" @@ -2152,6 +2161,7 @@ def __init__( link_to_name=False, match=None, info=None, + _unresolvable=False, **dialect_kw ): r""" @@ -2225,6 +2235,7 @@ def __init__( """ self._colspec = coercions.expect(roles.DDLReferredColumnRole, column) + self._unresolvable = _unresolvable if isinstance(self._colspec, util.string_types): self._table_column = None @@ -2411,6 +2422,11 @@ def _resolve_col_tokens(self): parenttable = self.parent.table + if self._unresolvable: + schema, tname, colname = self._column_tokens + tablekey = _get_table_key(tname, schema) + return parenttable, tablekey, colname + # assertion # basically Column._make_proxy() sends the actual # target Column to the ForeignKey object, so the @@ -2499,11 +2515,17 @@ def column(self): """ + return self._resolve_column() + + def _resolve_column(self, raiseerr=True): + if isinstance(self._colspec, util.string_types): parenttable, tablekey, colname = self._resolve_col_tokens() - if tablekey not in parenttable.metadata: + if self._unresolvable or tablekey not in parenttable.metadata: + if not raiseerr: + return None raise exc.NoReferencedTableError( "Foreign key associated with column '%s' could not find " "table '%s' with which to generate a " @@ -2512,6 +2534,8 @@ def column(self): tablekey, ) elif parenttable.key not in parenttable.metadata: + if not raiseerr: + return None raise exc.InvalidRequestError( "Table %s is no longer associated with its " "parent MetaData" % parenttable diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 7f3ef744c9c..019b29e3d1e 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -151,7 +151,7 @@ def find_left_clause_to_join_from(clauses, join_to, onclause): if set(f.c).union(s.c).issuperset(cols_in_onclause): idx.append(i) break - elif Join._can_join(f, s) or onclause is not None: + elif onclause is not None or Join._can_join(f, s): idx.append(i) break diff --git a/test/engine/test_reflection.py b/test/engine/test_reflection.py index 0a46ddeecaa..64a3bc4d329 100644 --- a/test/engine/test_reflection.py +++ b/test/engine/test_reflection.py @@ -12,6 +12,7 @@ from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import schema +from sqlalchemy import select from sqlalchemy import sql from sqlalchemy import String from sqlalchemy import testing @@ -23,6 +24,7 @@ from sqlalchemy.testing import config from sqlalchemy.testing import eq_ from sqlalchemy.testing import eq_regex +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ @@ -253,41 +255,6 @@ def test_nonexistent(self, connection): ) assert "nonexistent" not in meta.tables - def test_include_columns(self, connection, metadata): - meta = metadata - foo = Table( - "foo", - meta, - *[Column(n, sa.String(30)) for n in ["a", "b", "c", "d", "e", "f"]] - ) - meta.create_all(connection) - meta2 = MetaData() - foo = Table( - "foo", - meta2, - autoload_with=connection, - include_columns=["b", "f", "e"], - ) - # test that cols come back in original order - eq_([c.name for c in foo.c], ["b", "e", "f"]) - for c in ("b", "f", "e"): - assert c in foo.c - for c in ("a", "c", "d"): - assert c not in foo.c - - # test against a table which is already reflected - meta3 = MetaData() - foo = Table("foo", meta3, autoload_with=connection) - - foo = Table( - "foo", meta3, include_columns=["b", "f", "e"], extend_existing=True - ) - eq_([c.name for c in foo.c], ["b", "e", "f"]) - for c in ("b", "f", "e"): - assert c in foo.c - for c in ("a", "c", "d"): - assert c not in foo.c - def test_extend_existing(self, connection, metadata): meta = metadata @@ -2236,3 +2203,156 @@ def test_table_reflection(self): is_true(table.c.id1.identity is not None) eq_(table.c.id1.identity.start, 2) eq_(table.c.id1.identity.increment, 3) + + +class IncludeColsFksTest(AssertsCompiledSQL, fixtures.TestBase): + __dialect__ = "default" + + @testing.fixture + def tab_wo_fks(self, connection, metadata): + meta = metadata + foo = Table( + "foo", + meta, + *[Column(n, sa.String(30)) for n in ["a", "b", "c", "d", "e", "f"]] + ) + meta.create_all(connection) + + return foo + + @testing.fixture + def tab_w_fks(self, connection, metadata): + Table( + "a", + metadata, + Column("x", Integer, primary_key=True), + test_needs_fk=True, + ) + + b = Table( + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("q", Integer), + Column("p", Integer), + Column("r", Integer, ForeignKey("a.x")), + Column("s", Integer), + Column("t", Integer), + test_needs_fk=True, + ) + + metadata.create_all(connection) + + return b + + def test_include_columns(self, connection, tab_wo_fks): + foo = tab_wo_fks + meta2 = MetaData() + foo = Table( + "foo", + meta2, + autoload_with=connection, + include_columns=["b", "f", "e"], + ) + # test that cols come back in original order + eq_([c.name for c in foo.c], ["b", "e", "f"]) + for c in ("b", "f", "e"): + assert c in foo.c + for c in ("a", "c", "d"): + assert c not in foo.c + + # test against a table which is already reflected + meta3 = MetaData() + foo = Table("foo", meta3, autoload_with=connection) + + foo = Table( + "foo", meta3, include_columns=["b", "f", "e"], extend_existing=True + ) + eq_([c.name for c in foo.c], ["b", "e", "f"]) + for c in ("b", "f", "e"): + assert c in foo.c + for c in ("a", "c", "d"): + assert c not in foo.c + + @testing.emits_warning + @testing.combinations(True, False, argnames="resolve_fks") + def test_include_cols_skip_fk_col( + self, connection, tab_w_fks, resolve_fks + ): + """test #8100""" + + m2 = MetaData() + + b2 = Table( + "b", + m2, + autoload_with=connection, + resolve_fks=resolve_fks, + include_columns=["x", "q", "p"], + ) + + eq_([c.name for c in b2.c], ["x", "q", "p"]) + + # no FK, whether or not resolve_fks was called + eq_(b2.constraints, set((b2.primary_key,))) + + b2a = b2.alias() + eq_([c.name for c in b2a.c], ["x", "q", "p"]) + + self.assert_compile(select(b2), "SELECT b.x, b.q, b.p FROM b") + self.assert_compile( + select(b2.alias()), + "SELECT b_1.x, b_1.q, b_1.p FROM b AS b_1", + ) + + def test_table_works_minus_fks(self, connection, tab_w_fks): + """test #8101""" + + m2 = MetaData() + + b2 = Table( + "b", + m2, + autoload_with=connection, + resolve_fks=False, + ) + + eq_([c.name for c in b2.c], ["x", "q", "p", "r", "s", "t"]) + + b2a = b2.alias() + eq_([c.name for c in b2a.c], ["x", "q", "p", "r", "s", "t"]) + + self.assert_compile( + select(b2), "SELECT b.x, b.q, b.p, b.r, b.s, b.t FROM b" + ) + b2a_1 = b2.alias() + self.assert_compile( + select(b2a_1), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t FROM b AS b_1", + ) + + # reflecting the related table + a2 = Table("a", m2, autoload_with=connection) + + # the existing alias doesn't know about it + with expect_raises_message( + sa.exc.InvalidRequestError, + "Foreign key associated with column 'anon_1.r' could not find " + "table 'a' with which to generate a foreign key to target " + "column 'x'", + ): + select(b2a_1).join(a2).compile() + + # can still join manually (needed to fix inside of util for this...) + self.assert_compile( + select(b2a_1).join(a2, b2a_1.c.r == a2.c.x), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t " + "FROM b AS b_1 JOIN a ON b_1.r = a.x", + ) + + # a new alias does know about it however + self.assert_compile( + select(b2.alias()).join(a2), + "SELECT b_1.x, b_1.q, b_1.p, b_1.r, b_1.s, b_1.t " + "FROM b AS b_1 JOIN a ON a.x = b_1.r", + ) diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index eb577aa0235..e0113a7f101 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -1407,21 +1407,67 @@ def test_table_joined_to_select_of_table(self): assert j4.corresponding_column(j2.c.aid) is j4.c.aid assert j4.corresponding_column(a.c.id) is j4.c.id - def test_two_metadata_join_raises(self): + @testing.combinations(True, False) + def test_two_metadata_join_raises(self, include_a_joining_table): + """test case from 2008 enhanced as of #8101, more specific failure + modes for non-resolvable FKs + + """ m = MetaData() m2 = MetaData() t1 = Table("t1", m, Column("id", Integer), Column("id2", Integer)) - t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id"))) + + if include_a_joining_table: + t2 = Table("t2", m, Column("id", Integer, ForeignKey("t1.id"))) + t3 = Table("t3", m2, Column("id", Integer, ForeignKey("t1.id2"))) - s = ( - select(t2, t3) - .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) - .subquery() - ) + with expect_raises_message( + exc.NoReferencedTableError, + "Foreign key associated with column 't3.id'", + ): + t3.join(t1) - assert_raises(exc.NoReferencedTableError, s.join, t1) + if include_a_joining_table: + s = ( + select(t2, t3) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .subquery() + ) + else: + s = ( + select(t3) + .set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) + .subquery() + ) + + with expect_raises_message( + exc.NoReferencedTableError, + "Foreign key associated with column 'anon_1.t3_id' could not " + "find table 't1' with which to generate a foreign key to target " + "column 'id2'", + ): + select(s.join(t1)), + + # manual join is OK. using select().join() here is also exercising + # that join() does not need to resolve FKs if we provided the + # ON clause + if include_a_joining_table: + self.assert_compile( + select(s).join( + t1, and_(s.c.t2_id == t1.c.id, s.c.t3_id == t1.c.id) + ), + "SELECT anon_1.t2_id, anon_1.t3_id FROM (SELECT " + "t2.id AS t2_id, t3.id AS t3_id FROM t2, t3) AS anon_1 " + "JOIN t1 ON anon_1.t2_id = t1.id AND anon_1.t3_id = t1.id", + ) + else: + self.assert_compile( + select(s).join(t1, s.c.t3_id == t1.c.id), + "SELECT anon_1.t3_id FROM (SELECT t3.id AS t3_id FROM t3) " + "AS anon_1 JOIN t1 ON anon_1.t3_id = t1.id", + ) def test_multi_label_chain_naming_col(self): # See [ticket:2167] for this one. From 3f73cc2291906d3c4102173f6020529b705f55ee Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Jun 2022 11:56:57 -0400 Subject: [PATCH 265/632] add note re: pickling for loader criteria Change-Id: I75992af71ba08799a03995178a6e4612c9a7428a References: #8108 --- lib/sqlalchemy/orm/util.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index a5c571ad75d..1c9c9c796f7 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1092,12 +1092,19 @@ class of a particular set of mapped classes, to which the rule accepts a target class as an argument, when the given class is a base with many different mapped subclasses. + .. note:: when the SQL expression is a lambda, **pickling is not + supported**. Set + :paramref:`_orm.with_loader_criteria.propagate_to_loaders` + to ``False`` to prevent the object from being applied to instances. + :param include_aliases: if True, apply the rule to :func:`_orm.aliased` constructs as well. :param propagate_to_loaders: defaults to True, apply to relationship - loaders such as lazy loaders. - + loaders such as lazy loaders. This indicates that the + option object itself including SQL expression is carried along with + each loaded instance. Set to ``False`` to prevent the object from + being assigned to individual instances. .. seealso:: From f83d95085332454b700a46ab2540036dfa67363c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Jun 2022 16:03:26 -0400 Subject: [PATCH 266/632] document thread safety workaround for lambda statements Change-Id: Idb7840ff64487ef985087a28bb6e96088e6a392e References: #8098 (cherry picked from commit bf40bade26e32fc3757bbd756f4c9ebdc5d72090) --- doc/build/core/connections.rst | 49 ++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 14 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index c683c7ee9df..037ac3b580e 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1442,11 +1442,13 @@ SELECTs with LIMIT/OFFSET are correctly rendered and cached. Using Lambdas to add significant speed gains to statement production -------------------------------------------------------------------- -.. deepalchemy:: This technique is generally non-essential except in very performance - intensive scenarios, and intended for experienced Python programmers. - While fairly straightforward, it involves metaprogramming concepts that are - not appropriate for novice Python developers. The lambda approach can be - applied to at a later time to existing code with a minimal amount of effort. +.. deprecated:: 1.4 The lambda statement feature is being considered + for deprecation in SQLAlchemy and removal from documentation for 2.0. The + internal workings have been shown to be not thread safe during construction + for multi-lambda statements, and the overall complexity of the feature has + proven to be mostly impractical outside of a particular narrow use case + in the ORM. + Python functions, typically expressed as lambdas, may be used to generate SQL expressions which are cacheable based on the Python code location of @@ -1480,11 +1482,15 @@ to also having closure variables, which are significant to the whole approach:: from sqlalchemy import lambda_stmt + import threading + + mutex = threading.Lock() def run_my_statement(connection, parameter): - stmt = lambda_stmt(lambda: select(table)) - stmt += lambda s: s.where(table.c.col == parameter) - stmt += lambda s: s.order_by(table.c.id) + with mutex: + stmt = lambda_stmt(lambda: select(table)) + stmt += lambda s: s.where(table.c.col == parameter) + stmt += lambda s: s.order_by(table.c.id) return connection.execute(stmt) @@ -1515,15 +1521,26 @@ objects will run and analyze the given lambda in order to calculate how it should be cached on each run, trying to detect any potential problems. Basic guidelines include: +* **For multi-threaded applications, a mutex is required when building up + statements among multiple lambdas** - + this is a discovered limitation in the implementation; while SQLAlchemy + developers hope to repair it, code examples are illustrating this + usage for now until it can be fixed. + * **Any kind of statement is supported** - while it's expected that :func:`_sql.select` constructs are the prime use case for :func:`_sql.lambda_stmt`, DML statements such as :func:`_sql.insert` and :func:`_sql.update` are equally usable:: + import threading + + mutex = threading.Lock() + def upd(id_, newname): - stmt = lambda_stmt(lambda: users.update()) - stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + with mutex: + stmt = lambda_stmt(lambda: users.update()) + stmt += lambda s: s.values(name=newname) + stmt += lambda s: s.where(users.c.id==id_) return stmt with engine.begin() as conn: @@ -1535,15 +1552,19 @@ Basic guidelines include: can accommodate ORM functionality completely and used directly with :meth:`_orm.Session.execute`:: + import threading + + mutex = threading.Lock() + def select_user(session, name): - stmt = lambda_stmt(lambda: select(User)) - stmt += lambda s: s.where(User.name == name) + with mutex: + stmt = lambda_stmt(lambda: select(User)) + stmt += lambda s: s.where(User.name == name) row = session.execute(stmt).first() return row .. - * **Bound parameters are automatically accommodated** - in contrast to SQLAlchemy's previous "baked query" system, the lambda SQL system accommodates for Python literal values which become SQL bound parameters automatically. From 53ae884a0c5d107d1a4d9b4b64192c3b3c507ca8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 7 Jun 2022 15:00:20 -0400 Subject: [PATCH 267/632] fix race conditions in lambda statements Fixed multiple observed race conditions related to :func:`.lambda_stmt`, including an initial "dogpile" issue when a new Python code object is initially analyzed among multiple simultaneous threads which created both a performance issue as well as some internal corruption of state. Additionally repaired observed race condition which could occur when "cloning" an expression construct that is also in the process of being compiled or otherwise accessed in a different thread due to memoized attributes altering the ``__dict__`` while iterated, for Python versions prior to 3.10; in particular the lambda SQL construct is sensitive to this as it holds onto a single statement object persistently. The iteration has been refined to use ``dict.copy()`` with or without an additional iteration instead. Fixes: #8098 Change-Id: I4e0b627bfa187f1780dc68ec81b94db1c78f846a (cherry picked from commit 117878f7870377f143917a22160320a891eb0211) --- doc/build/changelog/unreleased_14/8098.rst | 16 +++++++++++ lib/sqlalchemy/sql/base.py | 3 +- lib/sqlalchemy/sql/elements.py | 9 +++++- lib/sqlalchemy/sql/lambdas.py | 33 ++++++++++++++++------ 4 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8098.rst diff --git a/doc/build/changelog/unreleased_14/8098.rst b/doc/build/changelog/unreleased_14/8098.rst new file mode 100644 index 00000000000..0267817abd6 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8098.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, sql + :tickets: 8098 + + Fixed multiple observed race conditions related to :func:`.lambda_stmt`, + including an initial "dogpile" issue when a new Python code object is + initially analyzed among multiple simultaneous threads which created both a + performance issue as well as some internal corruption of state. + Additionally repaired observed race condition which could occur when + "cloning" an expression construct that is also in the process of being + compiled or otherwise accessed in a different thread due to memoized + attributes altering the ``__dict__`` while iterated, for Python versions + prior to 3.10; in particular the lambda SQL construct is sensitive to this + as it holds onto a single statement object persistently. The iteration has + been refined to use ``dict.copy()`` with or without an additional iteration + instead. diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index 52339e35a73..ec685d1fac1 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -559,8 +559,9 @@ def _generate(self): cls = self.__class__ s = cls.__new__(cls) if skip: + # ensure this iteration remains atomic s.__dict__ = { - k: v for k, v in self.__dict__.items() if k not in skip + k: v for k, v in self.__dict__.copy().items() if k not in skip } else: s.__dict__ = self.__dict__.copy() diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 42ec3e0e7d2..a1891f19cab 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -241,7 +241,14 @@ def _clone(self, **kw): """ skip = self._memoized_keys c = self.__class__.__new__(self.__class__) - c.__dict__ = {k: v for k, v in self.__dict__.items() if k not in skip} + + if skip: + # ensure this iteration remains atomic + c.__dict__ = { + k: v for k, v in self.__dict__.copy().items() if k not in skip + } + else: + c.__dict__ = self.__dict__.copy() # this is a marker that helps to "equate" clauses to each other # when a Select returns its list of FROM clauses. the cloning diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 5f91559987d..584efe4c688 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -9,6 +9,7 @@ import itertools import operator import sys +import threading import types import weakref @@ -218,11 +219,17 @@ def _retrieve_tracker_rec(self, fn, apply_propagate_attrs, opts): if rec is None: if cache_key is not traversals.NO_CACHE: - rec = AnalyzedFunction( - tracker, self, apply_propagate_attrs, fn - ) - rec.closure_bindparams = bindparams - lambda_cache[tracker_key + cache_key] = rec + + with AnalyzedCode._generation_mutex: + key = tracker_key + cache_key + if key not in lambda_cache: + rec = AnalyzedFunction( + tracker, self, apply_propagate_attrs, fn + ) + rec.closure_bindparams = bindparams + lambda_cache[key] = rec + else: + rec = lambda_cache[key] else: rec = NonAnalyzedFunction(self._invoke_user_fn(fn)) @@ -607,6 +614,8 @@ class AnalyzedCode(object): ) _fns = weakref.WeakKeyDictionary() + _generation_mutex = threading.RLock() + @classmethod def get(cls, fn, lambda_element, lambda_kw, **kw): try: @@ -614,10 +623,16 @@ def get(cls, fn, lambda_element, lambda_kw, **kw): return cls._fns[fn.__code__] except KeyError: pass - cls._fns[fn.__code__] = analyzed = AnalyzedCode( - fn, lambda_element, lambda_kw, **kw - ) - return analyzed + + with cls._generation_mutex: + # check for other thread already created object + if fn.__code__ in cls._fns: + return cls._fns[fn.__code__] + + cls._fns[fn.__code__] = analyzed = AnalyzedCode( + fn, lambda_element, lambda_kw, **kw + ) + return analyzed def __init__(self, fn, lambda_element, opts): if inspect.ismethod(fn): From 70fbce17cda3fd9ea36febae6b93c3c5877f367f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Jun 2022 13:05:20 -0400 Subject: [PATCH 268/632] suppport with_loader_criteria pickling w/ fixed callable Fixed issue where a :func:`_orm.with_loader_criteria` option could not be pickled, as is necessary when it is carried along for propagation to lazy loaders in conjunction with a caching scheme. Currently, the only form that is supported as picklable is to pass the "where criteria" as a fixed module-level callable function that produces a SQL expression. An ad-hoc "lambda" can't be pickled, and a SQL expression object is usually not fully picklable directly. Fixes: #8109 Change-Id: I49fe69088b0c7e58a0f22c67d2ea4e33752a5a73 (cherry picked from commit 293b0e3dd8205185b84cd3baf2f078348437d245) --- doc/build/changelog/unreleased_14/8109.rst | 12 ++++++ lib/sqlalchemy/orm/util.py | 33 ++++++++++++++-- test/orm/test_pickled.py | 45 ++++++++++++++++++++++ 3 files changed, 86 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8109.rst diff --git a/doc/build/changelog/unreleased_14/8109.rst b/doc/build/changelog/unreleased_14/8109.rst new file mode 100644 index 00000000000..cf64d21ac4a --- /dev/null +++ b/doc/build/changelog/unreleased_14/8109.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, orm + :tickets: 8109 + + Fixed issue where a :func:`_orm.with_loader_criteria` option could not be + pickled, as is necessary when it is carried along for propagation to lazy + loaders in conjunction with a caching scheme. Currently, the only form that + is supported as picklable is to pass the "where criteria" as a fixed + module-level callable function that produces a SQL expression. An ad-hoc + "lambda" can't be pickled, and a SQL expression object is usually not fully + picklable directly. + diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 1c9c9c796f7..66f42ba810f 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -907,6 +907,8 @@ class _WrapUserEntity(object): """ + __slots__ = ("subject",) + def __init__(self, subject): self.subject = subject @@ -1092,10 +1094,9 @@ class of a particular set of mapped classes, to which the rule accepts a target class as an argument, when the given class is a base with many different mapped subclasses. - .. note:: when the SQL expression is a lambda, **pickling is not - supported**. Set - :paramref:`_orm.with_loader_criteria.propagate_to_loaders` - to ``False`` to prevent the object from being applied to instances. + .. note:: To support pickling, use a module-level Python function to + produce the SQL expression instead of a lambda or a fixed SQL + expression, which tend to not be picklable. :param include_aliases: if True, apply the rule to :func:`_orm.aliased` constructs as well. @@ -1132,6 +1133,7 @@ class of a particular set of mapped classes, to which the rule self.root_entity = None self.entity = entity + self._where_crit_orig = where_criteria if callable(where_criteria): self.deferred_where_criteria = True self.where_criteria = lambdas.DeferredLambdaElement( @@ -1157,7 +1159,30 @@ class of a particular set of mapped classes, to which the rule self.include_aliases = include_aliases self.propagate_to_loaders = propagate_to_loaders + @classmethod + def _unreduce( + cls, entity, where_criteria, include_aliases, propagate_to_loaders + ): + return LoaderCriteriaOption( + entity, + where_criteria, + include_aliases=include_aliases, + propagate_to_loaders=propagate_to_loaders, + ) + + def __reduce__(self): + return ( + LoaderCriteriaOption._unreduce, + ( + self.entity.class_ if self.entity else None, + self._where_crit_orig, + self.include_aliases, + self.propagate_to_loaders, + ), + ) + def _all_mappers(self): + if self.entity: for ent in self.entity.mapper.self_and_descendants: yield ent diff --git a/test/orm/test_pickled.py b/test/orm/test_pickled.py index 11d90bd5907..c1be0ca25c8 100644 --- a/test/orm/test_pickled.py +++ b/test/orm/test_pickled.py @@ -15,6 +15,7 @@ from sqlalchemy.orm import relationship from sqlalchemy.orm import state as sa_state from sqlalchemy.orm import subqueryload +from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm import with_polymorphic from sqlalchemy.orm.collections import attribute_mapped_collection from sqlalchemy.orm.collections import column_mapped_collection @@ -43,6 +44,10 @@ from .inheritance._poly_fixtures import Person +def no_ed_foo(cls): + return cls.email_address != "ed@foo.com" + + class PickleTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): @@ -324,6 +329,46 @@ def test_invalidated_flag_deepcopy(self): u2.addresses.append(Address()) eq_(len(u2.addresses), 2) + @testing.requires.python3 + @testing.combinations(True, False, argnames="pickle_it") + def test_loader_criteria(self, pickle_it): + """test #8109""" + + users, addresses = (self.tables.users, self.tables.addresses) + + self.mapper_registry.map_imperatively( + User, + users, + properties={"addresses": relationship(Address)}, + ) + self.mapper_registry.map_imperatively(Address, addresses) + + with fixture_session(expire_on_commit=False) as sess: + u1 = User(name="ed") + u1.addresses = [ + Address(email_address="ed@bar.com"), + Address(email_address="ed@foo.com"), + ] + sess.add(u1) + sess.commit() + + with fixture_session(expire_on_commit=False) as sess: + # note that non-lambda is not picklable right now as + # SQL expressions usually can't be pickled. + opt = with_loader_criteria( + Address, + no_ed_foo, + ) + + u1 = sess.query(User).options(opt).first() + + if pickle_it: + u1 = pickle.loads(pickle.dumps(u1)) + sess.close() + sess.add(u1) + + eq_([ad.email_address for ad in u1.addresses], ["ed@bar.com"]) + @testing.requires.non_broken_pickle def test_instance_deferred_cols(self): users, addresses = (self.tables.users, self.tables.addresses) From 5a8eca0bbda78d2d2d180e083bc4aca663b7304c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Jun 2022 19:04:23 -0400 Subject: [PATCH 269/632] add tests to confirm no issue w/ pg json keys Change-Id: Ie91e5efb217c309bc40c3933f538bcf29c1fd87b References: #8112 (cherry picked from commit 409a2173ebe8a9126911051873b3734e6c6be3f4) --- test/dialect/postgresql/test_types.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index ad0fcfeeea3..564554f668f 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -3828,6 +3828,33 @@ def _assert_column_is_JSON_NULL(self, conn, column="data"): ).fetchall() eq_([d for d, in data], [None]) + @testing.combinations( + "key", + "réve🐍 illé", + 'name_with"quotes"name', + "name with spaces", + "name with ' single ' quotes", + 'some_key("idx")', + argnames="key", + ) + def test_indexed_special_keys(self, connection, key): + data_table = self.tables.data_table + data_element = {key: "some value"} + + connection.execute( + data_table.insert(), + { + "name": "row1", + "data": data_element, + "nulldata": data_element, + }, + ) + + row = connection.execute( + select(data_table.c.data[key], data_table.c.nulldata[key]) + ).one() + eq_(row, ("some value", "some value")) + def test_reflect(self, connection): insp = inspect(connection) cols = insp.get_columns("data_table") From 4f59ecef6f500e7e2586ee9c5d992a33e0d5d043 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Jun 2022 08:55:14 -0400 Subject: [PATCH 270/632] handle non-mapped mixins for with_loader_criteria reduce special handling is needed for a with_loader_criteria() against a non-mapped mixin class. added that to test coverage Fixes: #8109 Change-Id: Ia599361c8faab008e92095eb4607d02820f590d5 (cherry picked from commit 6f93f88b5ee683141c81ecd434a4c0818e08dbd9) --- lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/testing/pickleable.py | 10 ++++++++++ test/orm/test_pickled.py | 19 +++++++++++++------ 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 66f42ba810f..f95af41d24a 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1174,7 +1174,7 @@ def __reduce__(self): return ( LoaderCriteriaOption._unreduce, ( - self.entity.class_ if self.entity else None, + self.entity.class_ if self.entity else self.root_entity, self._where_crit_orig, self.include_aliases, self.propagate_to_loaders, diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index 04405e53974..f05960c839a 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -10,6 +10,8 @@ """ from . import fixtures +from ..schema import Column +from ..types import String class User(fixtures.ComparableEntity): @@ -51,6 +53,14 @@ def __init__(self, obj, parent=None): self.parent = parent +class Mixin(object): + email_address = Column(String) + + +class AddressWMixin(Mixin, fixtures.ComparableEntity): + pass + + class Foo(object): def __init__(self, moredata, stuff="im stuff"): self.data = "im data" diff --git a/test/orm/test_pickled.py b/test/orm/test_pickled.py index c1be0ca25c8..fe7ac7b7028 100644 --- a/test/orm/test_pickled.py +++ b/test/orm/test_pickled.py @@ -24,10 +24,12 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.pickleable import Address +from sqlalchemy.testing.pickleable import AddressWMixin from sqlalchemy.testing.pickleable import Child1 from sqlalchemy.testing.pickleable import Child2 from sqlalchemy.testing.pickleable import Dingaling from sqlalchemy.testing.pickleable import EmailUser +from sqlalchemy.testing.pickleable import Mixin from sqlalchemy.testing.pickleable import Order from sqlalchemy.testing.pickleable import Parent from sqlalchemy.testing.pickleable import Screen @@ -331,23 +333,27 @@ def test_invalidated_flag_deepcopy(self): @testing.requires.python3 @testing.combinations(True, False, argnames="pickle_it") - def test_loader_criteria(self, pickle_it): + @testing.combinations(True, False, argnames="use_mixin") + def test_loader_criteria(self, pickle_it, use_mixin): """test #8109""" users, addresses = (self.tables.users, self.tables.addresses) + AddressCls = AddressWMixin if use_mixin else Address + self.mapper_registry.map_imperatively( User, users, - properties={"addresses": relationship(Address)}, + properties={"addresses": relationship(AddressCls)}, ) - self.mapper_registry.map_imperatively(Address, addresses) + + self.mapper_registry.map_imperatively(AddressCls, addresses) with fixture_session(expire_on_commit=False) as sess: u1 = User(name="ed") u1.addresses = [ - Address(email_address="ed@bar.com"), - Address(email_address="ed@foo.com"), + AddressCls(email_address="ed@bar.com"), + AddressCls(email_address="ed@foo.com"), ] sess.add(u1) sess.commit() @@ -356,8 +362,9 @@ def test_loader_criteria(self, pickle_it): # note that non-lambda is not picklable right now as # SQL expressions usually can't be pickled. opt = with_loader_criteria( - Address, + Mixin if use_mixin else Address, no_ed_foo, + include_aliases=True, ) u1 = sess.query(User).options(opt).first() From ed7eb6f7f19bd26e984bc3be065243f8ead38b25 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Jun 2022 09:53:43 -0400 Subject: [PATCH 271/632] dont transfer __weakref__ to regenerated class Repaired a deprecation warning class decorator that was preventing key objects such as :class:`_engine.Connection` from having a proper ``__weakref__`` attribute, causing operations like Python standard library ``inspect.getmembers()`` to fail. Fixes: #8115 Change-Id: Ifd0bc2325fb9dc9e1431998c308b7fc081968373 (cherry picked from commit cc7cc3c9ec73055703acc78c8d92eb0242e5cd20) --- doc/build/changelog/unreleased_14/8115.rst | 9 +++++ lib/sqlalchemy/util/deprecations.py | 1 + test/base/test_warnings.py | 46 ++++++++++++++++++++++ 3 files changed, 56 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8115.rst diff --git a/doc/build/changelog/unreleased_14/8115.rst b/doc/build/changelog/unreleased_14/8115.rst new file mode 100644 index 00000000000..856a76a42ec --- /dev/null +++ b/doc/build/changelog/unreleased_14/8115.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, engine + :tickets: 8115 + + Repaired a deprecation warning class decorator that was preventing key + objects such as :class:`_engine.Connection` from having a proper + ``__weakref__`` attribute, causing operations like Python standard library + ``inspect.getmembers()`` to fail. + diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index fe2f968040a..b61516d85c4 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -356,6 +356,7 @@ def _decorate_cls_with_warning( clsdict = dict(cls.__dict__) clsdict["__doc__"] = doc clsdict.pop("__dict__", None) + clsdict.pop("__weakref__", None) cls = type(cls.__name__, cls.__bases__, clsdict) if constructor is not None: constructor_fn = clsdict[constructor] diff --git a/test/base/test_warnings.py b/test/base/test_warnings.py index 0cbab7f2824..be34f958b28 100644 --- a/test/base/test_warnings.py +++ b/test/base/test_warnings.py @@ -1,6 +1,9 @@ +from sqlalchemy import testing +from sqlalchemy.exc import SADeprecationWarning from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_deprecated from sqlalchemy.testing import fixtures +from sqlalchemy.util.deprecations import _decorate_cls_with_warning from sqlalchemy.util.deprecations import warn_deprecated_limited from sqlalchemy.util.langhelpers import _hash_limit_string @@ -34,3 +37,46 @@ def test_warn_deprecated_limited_cap(self): eq_(len(printouts), occurrences) eq_(len(messages), cap) + + +class ClsWarningTest(fixtures.TestBase): + @testing.fixture + def dep_cls_fixture(self): + class Connectable(object): + """a docstring""" + + some_member = "foo" + + Connectable = _decorate_cls_with_warning( + Connectable, + None, + SADeprecationWarning, + "a message", + "2.0", + "another message", + ) + + return Connectable + + def test_dep_inspectable(self, dep_cls_fixture): + """test #8115""" + + import inspect + + class PlainClass(object): + some_member = "bar" + + pc_keys = dict(inspect.getmembers(PlainClass())) + insp_keys = dict(inspect.getmembers(dep_cls_fixture())) + + assert set(insp_keys).intersection( + ( + "__class__", + "__doc__", + "__eq__", + "__dict__", + "__weakref__", + "some_member", + ) + ) + eq_(set(pc_keys), set(insp_keys)) From 49bd3b0046e8d0e16f4b3a2cbe579b64935d6c34 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Jun 2022 21:35:02 -0400 Subject: [PATCH 272/632] restore parameter escaping for public methods Adjusted the fix made for :ticket:`8056` which adjusted the escaping of bound parameter names with special characters such that the escaped names were translated after the SQL compilation step, which broke a published recipe on the FAQ illustrating how to merge parameter names into the string output of a compiled SQL string. The change restores the escaped names that come from ``compiled.params`` and adds a conditional parameter to :meth:`.SQLCompiler.construct_params` named ``escape_names`` that defaults to ``True``, restoring the old behavior by default. Fixes: #8113 Change-Id: I9cbedb1080bc06d51f287fd2cbf26aaab1c74653 (cherry picked from commit 105cd180856309cf5abf24f59b782a1bcd8210d6) --- doc/build/changelog/unreleased_14/8113.rst | 12 +++++++ lib/sqlalchemy/engine/default.py | 4 ++- lib/sqlalchemy/sql/compiler.py | 37 ++++++++++++++++------ test/sql/test_compiler.py | 33 ++++++++++++++++--- 4 files changed, 71 insertions(+), 15 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8113.rst diff --git a/doc/build/changelog/unreleased_14/8113.rst b/doc/build/changelog/unreleased_14/8113.rst new file mode 100644 index 00000000000..100f9a731f0 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8113.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, sql + :tickets: 8113 + + Adjusted the fix made for :ticket:`8056` which adjusted the escaping of + bound parameter names with special characters such that the escaped names + were translated after the SQL compilation step, which broke a published + recipe on the FAQ illustrating how to merge parameter names into the string + output of a compiled SQL string. The change restores the escaped names that + come from ``compiled.params`` and adds a conditional parameter to + :meth:`.SQLCompiler.construct_params` named ``escape_names`` that defaults + to ``True``, restoring the old behavior by default. diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index cc0844e1c3f..028c4b0713a 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -988,13 +988,15 @@ def _init_compiled( if not parameters: self.compiled_parameters = [ compiled.construct_params( - extracted_parameters=extracted_parameters + extracted_parameters=extracted_parameters, + escape_names=False, ) ] else: self.compiled_parameters = [ compiled.construct_params( m, + escape_names=False, _group_number=grp, extracted_parameters=extracted_parameters, ) diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 2f3033d7058..477c199c175 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -490,7 +490,9 @@ def __str__(self): return self.string or "" - def construct_params(self, params=None, extracted_parameters=None): + def construct_params( + self, params=None, extracted_parameters=None, escape_names=True + ): """Return the bind params for this compiled object. :param params: a dict of string/object pairs whose values will @@ -932,9 +934,12 @@ def construct_params( _group_number=None, _check=True, extracted_parameters=None, + escape_names=True, ): """return a dictionary of bind parameter keys and values""" + has_escaped_names = escape_names and bool(self.escaped_bind_names) + if extracted_parameters: # related the bound parameters collected in the original cache key # to those collected in the incoming cache key. They will not have @@ -965,10 +970,16 @@ def construct_params( if params: pd = {} for bindparam, name in self.bind_names.items(): + escaped_name = ( + self.escaped_bind_names.get(name, name) + if has_escaped_names + else name + ) + if bindparam.key in params: - pd[name] = params[bindparam.key] + pd[escaped_name] = params[bindparam.key] elif name in params: - pd[name] = params[name] + pd[escaped_name] = params[name] elif _check and bindparam.required: if _group_number: @@ -993,13 +1004,19 @@ def construct_params( value_param = bindparam if bindparam.callable: - pd[name] = value_param.effective_value + pd[escaped_name] = value_param.effective_value else: - pd[name] = value_param.value + pd[escaped_name] = value_param.value return pd else: pd = {} for bindparam, name in self.bind_names.items(): + escaped_name = ( + self.escaped_bind_names.get(name, name) + if has_escaped_names + else name + ) + if _check and bindparam.required: if _group_number: raise exc.InvalidRequestError( @@ -1021,9 +1038,9 @@ def construct_params( value_param = bindparam if bindparam.callable: - pd[name] = value_param.effective_value + pd[escaped_name] = value_param.effective_value else: - pd[name] = value_param.value + pd[escaped_name] = value_param.value return pd @util.memoized_instancemethod @@ -1123,7 +1140,7 @@ def _process_parameters_for_postcompile( """ if parameters is None: - parameters = self.construct_params() + parameters = self.construct_params(escape_names=False) expanded_parameters = {} if self.positional: @@ -4317,7 +4334,9 @@ def sql_compiler(self): def type_compiler(self): return self.dialect.type_compiler - def construct_params(self, params=None, extracted_parameters=None): + def construct_params( + self, params=None, extracted_parameters=None, escape_names=True + ): return None def visit_ddl(self, ddl, **kwargs): diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 4db5f3df9d2..5953c6449e4 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -3662,10 +3662,14 @@ def test_bind_param_escaping(self): """general bind param escape unit tests added as a result of #8053. - However, note that the final application of an escaped param name + The final application of an escaped param name was moved out of compiler and into DefaultExecutionContext in related issue #8056. + However in #8113 we made this conditional to suit usage recipes + posted in the FAQ. + + """ SomeEnum = pep435_enum("SomeEnum") @@ -3698,14 +3702,33 @@ def bindparam_string(self, name, **kw): compiled = t.insert().compile( dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) ) - params = compiled.construct_params({"_id": 1, "_data": one}) + # not escaped + params = compiled.construct_params( + {"_id": 1, "_data": one}, escape_names=False + ) eq_(params, {"_id": 1, "_data": one}) + + # escaped by default + params = compiled.construct_params({"_id": 1, "_data": one}) + eq_(params, {'"_id"': 1, '"_data"': one}) + + # escaped here as well + eq_(compiled.params, {'"_data"': None, '"_id"': None}) + + # bind processors aren't part of this eq_(compiled._bind_processors, {"_data": mock.ANY}) - # previously, this was: - # eq_(params, {'"_id"': 1, '"_data"': one}) - # eq_(compiled._bind_processors, {'"_data"': mock.ANY}) + dialect.paramstyle = "pyformat" + compiled = t.insert().compile( + dialect=dialect, compile_kwargs=dict(compile_keys=("_id", "_data")) + ) + + # FAQ recipe works + eq_( + compiled.string % compiled.params, + "INSERT INTO t (_id, _data) VALUES (None, None)", + ) def test_expanding_non_expanding_conflict(self): """test #8018""" From 40aeb32efb9a9680be6db3ba4ca41ba2bf5658a8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Jun 2022 10:57:22 -0400 Subject: [PATCH 273/632] Revert "document thread safety workaround for lambda statements" This reverts commit f83d95085332454b700a46ab2540036dfa67363c. --- doc/build/core/connections.rst | 49 ++++++++++------------------------ 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 037ac3b580e..c683c7ee9df 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1442,13 +1442,11 @@ SELECTs with LIMIT/OFFSET are correctly rendered and cached. Using Lambdas to add significant speed gains to statement production -------------------------------------------------------------------- -.. deprecated:: 1.4 The lambda statement feature is being considered - for deprecation in SQLAlchemy and removal from documentation for 2.0. The - internal workings have been shown to be not thread safe during construction - for multi-lambda statements, and the overall complexity of the feature has - proven to be mostly impractical outside of a particular narrow use case - in the ORM. - +.. deepalchemy:: This technique is generally non-essential except in very performance + intensive scenarios, and intended for experienced Python programmers. + While fairly straightforward, it involves metaprogramming concepts that are + not appropriate for novice Python developers. The lambda approach can be + applied to at a later time to existing code with a minimal amount of effort. Python functions, typically expressed as lambdas, may be used to generate SQL expressions which are cacheable based on the Python code location of @@ -1482,15 +1480,11 @@ to also having closure variables, which are significant to the whole approach:: from sqlalchemy import lambda_stmt - import threading - - mutex = threading.Lock() def run_my_statement(connection, parameter): - with mutex: - stmt = lambda_stmt(lambda: select(table)) - stmt += lambda s: s.where(table.c.col == parameter) - stmt += lambda s: s.order_by(table.c.id) + stmt = lambda_stmt(lambda: select(table)) + stmt += lambda s: s.where(table.c.col == parameter) + stmt += lambda s: s.order_by(table.c.id) return connection.execute(stmt) @@ -1521,26 +1515,15 @@ objects will run and analyze the given lambda in order to calculate how it should be cached on each run, trying to detect any potential problems. Basic guidelines include: -* **For multi-threaded applications, a mutex is required when building up - statements among multiple lambdas** - - this is a discovered limitation in the implementation; while SQLAlchemy - developers hope to repair it, code examples are illustrating this - usage for now until it can be fixed. - * **Any kind of statement is supported** - while it's expected that :func:`_sql.select` constructs are the prime use case for :func:`_sql.lambda_stmt`, DML statements such as :func:`_sql.insert` and :func:`_sql.update` are equally usable:: - import threading - - mutex = threading.Lock() - def upd(id_, newname): - with mutex: - stmt = lambda_stmt(lambda: users.update()) - stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + stmt = lambda_stmt(lambda: users.update()) + stmt += lambda s: s.values(name=newname) + stmt += lambda s: s.where(users.c.id==id_) return stmt with engine.begin() as conn: @@ -1552,19 +1535,15 @@ Basic guidelines include: can accommodate ORM functionality completely and used directly with :meth:`_orm.Session.execute`:: - import threading - - mutex = threading.Lock() - def select_user(session, name): - with mutex: - stmt = lambda_stmt(lambda: select(User)) - stmt += lambda s: s.where(User.name == name) + stmt = lambda_stmt(lambda: select(User)) + stmt += lambda s: s.where(User.name == name) row = session.execute(stmt).first() return row .. + * **Bound parameters are automatically accommodated** - in contrast to SQLAlchemy's previous "baked query" system, the lambda SQL system accommodates for Python literal values which become SQL bound parameters automatically. From 01b40e9e519fc24edc32f76e085fe4719982181a Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 13 Jun 2022 22:15:48 +0200 Subject: [PATCH 274/632] try fixing the build Change-Id: Id2e965aa13a6d7134ca1081554cc5b25dbcc9fde (cherry picked from commit 92bd08c3daff74b94652a5edefd5efa7632ccb22) # Conflicts: # test/requirements.py --- test/base/test_tutorials.py | 2 ++ test/requirements.py | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/test/base/test_tutorials.py b/test/base/test_tutorials.py index 74011d3d494..bde7baa7505 100644 --- a/test/base/test_tutorials.py +++ b/test/base/test_tutorials.py @@ -9,6 +9,7 @@ from sqlalchemy import testing from sqlalchemy.testing import config from sqlalchemy.testing import fixtures +from sqlalchemy.testing import requires class DocTest(fixtures.TestBase): @@ -88,6 +89,7 @@ def _run_doctest(self, *fnames): globs.update(test.globs) assert not runner.failures + @requires.has_json_each def test_20_style(self): self._run_doctest( "tutorial/index.rst", diff --git a/test/requirements.py b/test/requirements.py index dda8fd6cbb7..e47099013ee 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1894,3 +1894,17 @@ def check(config): return res is not None return only_on(["mssql"]) + only_if(check) + + @property + def has_json_each(self): + def go(config): + try: + with config.db.connect() as conn: + conn.exec_driver_sql( + """SELECT x.value FROM json_each('["b", "a"]') as x""" + ) + return True + except exc.DBAPIError: + return False + + return only_if(go, "json_each is required") From 2da2e4daec380bfa52be914b711e73b0f859a6e8 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Tue, 14 Jun 2022 07:27:31 -0600 Subject: [PATCH 275/632] Remove reflect=True in Base.prepare examples Change-Id: Icdb17fab0f92762a266efbe1a64bec1d5a6dc9ab (cherry picked from commit be9937d385a74560b65c6ab525f13bc68a5041c1) --- lib/sqlalchemy/ext/automap.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index a586ae1c4ca..9502b09e807 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -41,7 +41,7 @@ engine = create_engine("sqlite:///mydatabase.db") # reflect the tables - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) # mapped classes are now created with names by default # matching that of the table name. @@ -151,7 +151,7 @@ class User(Base): # reflect engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) # we still have Address generated from the tablename "address", # but User is the same as Base.classes.User now @@ -215,7 +215,7 @@ def pluralize_collection(base, local_cls, referred_cls, constraint): engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, classname_for_table=camelize_classname, name_for_collection_relationship=pluralize_collection ) @@ -333,7 +333,7 @@ def _gen_relationship(base, direction, return_fn, Base = automap_base() engine = create_engine("sqlite:///mydatabase.db") - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, generate_relationship=_gen_relationship) Many-to-Many relationships @@ -464,7 +464,7 @@ def name_for_scalar_relationship(base, local_cls, referred_cls, constraint): return name - Base.prepare(engine, reflect=True, + Base.prepare(autoload_with=engine, name_for_scalar_relationship=name_for_scalar_relationship) Alternatively, we can change the name on the column side. The columns @@ -478,7 +478,7 @@ class TableB(Base): __tablename__ = 'table_b' _table_a = Column('table_a', ForeignKey('table_a.id')) - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) Using Automap with Explicit Declarations @@ -547,7 +547,7 @@ def column_reflect(inspector, table, column_info): column_info['key'] = "attr_%s" % column_info['name'].lower() # run reflection - Base.prepare(engine, reflect=True) + Base.prepare(autoload_with=engine) .. versionadded:: 1.4.0b2 the :meth:`_events.DDLEvents.column_reflect` event may be applied to a :class:`_schema.MetaData` object. @@ -743,7 +743,7 @@ class that is produced by the :func:`.declarative.declarative_base` are present under the name they were given, e.g.:: Base = automap_base() - Base.prepare(engine=some_engine, reflect=True) + Base.prepare(autoload_with=some_engine) User, Address = Base.classes.User, Base.classes.Address From f4188f571df11905bb4aab107b45298c7130a0ec Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 14 Jun 2022 15:41:31 -0400 Subject: [PATCH 276/632] pickle mutable parents according to key Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM mapped instance would not correctly restore state for mappings that contained multiple :class:`.Mutable`-enabled attributes. Fixes: #8133 Change-Id: I508763e0df0d7a624e1169f9a46d7f25404add1e (cherry picked from commit 69020e416d9836fcc0bc99fcf008563263fb86f3) --- doc/build/changelog/unreleased_14/8133.rst | 7 +++ lib/sqlalchemy/ext/mutable.py | 7 +-- test/ext/test_mutable.py | 59 ++++++++++++++++++++++ 3 files changed, 70 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8133.rst diff --git a/doc/build/changelog/unreleased_14/8133.rst b/doc/build/changelog/unreleased_14/8133.rst new file mode 100644 index 00000000000..36da8ad8e6c --- /dev/null +++ b/doc/build/changelog/unreleased_14/8133.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, ext + :tickets: 8133 + + Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM + mapped instance would not correctly restore state for mappings that + contained multiple :class:`.Mutable`-enabled attributes. diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index b5217a42677..934ac37a056 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -354,6 +354,7 @@ def __setstate__(self, state): :meth:`MutableBase._parents` collection is restored to all ``Point`` objects. """ +from collections import defaultdict import weakref from .. import event @@ -496,12 +497,12 @@ def pickle(state, state_dict): val = state.dict.get(key, None) if val is not None: if "ext.mutable.values" not in state_dict: - state_dict["ext.mutable.values"] = [] - state_dict["ext.mutable.values"].append(val) + state_dict["ext.mutable.values"] = defaultdict(list) + state_dict["ext.mutable.values"][key].append(val) def unpickle(state, state_dict): if "ext.mutable.values" in state_dict: - for val in state_dict["ext.mutable.values"]: + for val in state_dict["ext.mutable.values"][key]: val._parents[state] = key event.listen(parent_cls, "load", load, raw=True, propagate=True) diff --git a/test/ext/test_mutable.py b/test/ext/test_mutable.py index 1d88deb7a0e..ff167b25365 100644 --- a/test/ext/test_mutable.py +++ b/test/ext/test_mutable.py @@ -4,7 +4,9 @@ from sqlalchemy import event from sqlalchemy import ForeignKey from sqlalchemy import func +from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import util @@ -16,6 +18,7 @@ from sqlalchemy.orm import column_property from sqlalchemy.orm import composite from sqlalchemy.orm import declarative_base +from sqlalchemy.orm import Session from sqlalchemy.orm.instrumentation import ClassManager from sqlalchemy.orm.mapper import Mapper from sqlalchemy.testing import assert_raises @@ -41,6 +44,10 @@ class SubFoo(Foo): pass +class Foo2(fixtures.BasicEntity): + pass + + class FooWithEq(object): def __init__(self, **kw): for k in kw: @@ -102,6 +109,58 @@ def teardown_test(self): ClassManager.dispatch._clear() +class MiscTest(fixtures.TestBase): + @testing.combinations(True, False, argnames="pickleit") + def test_pickle_parent_multi_attrs(self, registry, connection, pickleit): + """test #8133""" + + local_foo = Table( + "lf", + registry.metadata, + Column("id", Integer, primary_key=True), + Column("j1", MutableDict.as_mutable(PickleType)), + Column("j2", MutableDict.as_mutable(PickleType)), + Column("j3", MutableDict.as_mutable(PickleType)), + Column("j4", MutableDict.as_mutable(PickleType)), + ) + + registry.map_imperatively(Foo2, local_foo) + registry.metadata.create_all(connection) + + with Session(connection) as sess: + + data = dict( + j1={"a": 1}, + j2={"b": 2}, + j3={"c": 3}, + j4={"d": 4}, + ) + lf = Foo2(**data) + sess.add(lf) + sess.commit() + + all_attrs = {"j1", "j2", "j3", "j4"} + for attr in all_attrs: + for loads, dumps in picklers(): + with Session(connection) as sess: + f1 = sess.scalars(select(Foo2)).first() + if pickleit: + f2 = loads(dumps(f1)) + else: + f2 = f1 + + existing_dict = getattr(f2, attr) + existing_dict["q"] = "c" + eq_( + inspect(f2).attrs[attr].history, + ([existing_dict], (), ()), + ) + for other_attr in all_attrs.difference([attr]): + a = inspect(f2).attrs[other_attr].history + b = ((), [data[other_attr]], ()) + eq_(a, b) + + class _MutableDictTestBase(_MutableDictTestFixture): run_define_tables = "each" From c0de2ec4af9c03ca9f6c82921e94af66b9c753e2 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Tue, 14 Jun 2022 10:09:04 -0600 Subject: [PATCH 277/632] Allow NUMERIC()/DECIMAL() IDENTITY columns Fixed issue where :class:`.Table` objects that made use of IDENTITY columns with a :class:`.Numeric` datatype would produce errors when attempting to reconcile the "autoincrement" column, preventing construction of the :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter as well as emitting errors when attempting to invoke an :class:`.Insert` construct. Fixes: #8111 Change-Id: Iaacc4eebfbafb42fa18f9a1a4f43cb2b6b91d28a (cherry picked from commit a134956c4e4564844c33302ddf27a70102fe00a8) --- doc/build/changelog/unreleased_14/8111.rst | 11 +++++++++++ lib/sqlalchemy/sql/schema.py | 6 +++++- lib/sqlalchemy/sql/sqltypes.py | 2 ++ lib/sqlalchemy/sql/type_api.py | 1 + test/dialect/mssql/test_query.py | 17 +++++++++++++++++ 5 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8111.rst diff --git a/doc/build/changelog/unreleased_14/8111.rst b/doc/build/changelog/unreleased_14/8111.rst new file mode 100644 index 00000000000..ac43297027d --- /dev/null +++ b/doc/build/changelog/unreleased_14/8111.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, schema, mssql + :tickets: 8111 + + Fixed issue where :class:`.Table` objects that made use of IDENTITY columns + with a :class:`.Numeric` datatype would produce errors when attempting to + reconcile the "autoincrement" column, preventing construction of the + :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter + as well as emitting errors when attempting to invoke an :class:`.Insert` + construct. + diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index bc7e65d90c8..e89ac9ef561 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -4081,7 +4081,11 @@ def columns_autoinc_first(self): def _autoincrement_column(self): def _validate_autoinc(col, autoinc_true): if col.type._type_affinity is None or not issubclass( - col.type._type_affinity, type_api.INTEGERTYPE._type_affinity + col.type._type_affinity, + ( + type_api.INTEGERTYPE._type_affinity, + type_api.NUMERICTYPE._type_affinity, + ), ): if autoinc_true: raise exc.ArgumentError( diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 306ac397df3..fa50da65022 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -3288,6 +3288,7 @@ class MatchType(Boolean): BOOLEANTYPE = Boolean() STRINGTYPE = String() INTEGERTYPE = Integer() +NUMERICTYPE = Numeric() MATCHTYPE = MatchType() TABLEVALUE = TableValueType() DATETIME_TIMEZONE = DateTime(timezone=True) @@ -3342,6 +3343,7 @@ def _resolve_value_to_type(value): type_api.STRINGTYPE = STRINGTYPE type_api.INTEGERTYPE = INTEGERTYPE type_api.NULLTYPE = NULLTYPE +type_api.NUMERICTYPE = NUMERICTYPE type_api.MATCHTYPE = MATCHTYPE type_api.INDEXABLE = Indexable type_api.TABLEVALUE = TABLEVALUE diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 840668c378e..7431c08a41d 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -22,6 +22,7 @@ BOOLEANTYPE = None INTEGERTYPE = None NULLTYPE = None +NUMERICTYPE = None STRINGTYPE = None MATCHTYPE = None INDEXABLE = None diff --git a/test/dialect/mssql/test_query.py b/test/dialect/mssql/test_query.py index 4c02fc171c8..3a34bf04cef 100644 --- a/test/dialect/mssql/test_query.py +++ b/test/dialect/mssql/test_query.py @@ -1,4 +1,6 @@ # -*- encoding: utf-8 +import decimal + from sqlalchemy import and_ from sqlalchemy import Column from sqlalchemy import DDL @@ -9,6 +11,7 @@ from sqlalchemy import Identity from sqlalchemy import Integer from sqlalchemy import literal +from sqlalchemy import Numeric from sqlalchemy import or_ from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import select @@ -41,6 +44,13 @@ def define_tables(cls, metadata): Column("description", String(50)), PrimaryKeyConstraint("id", name="PK_cattable"), ) + Table( + "numeric_identity", + metadata, + Column("id", Numeric(18, 0), autoincrement=True), + Column("description", String(50)), + PrimaryKeyConstraint("id", name="PK_numeric_identity"), + ) def test_compiled(self): cattable = self.tables.cattable @@ -63,6 +73,13 @@ def test_execute(self, connection): lastcat = conn.execute(cattable.select().order_by(desc(cattable.c.id))) eq_((10, "PHP"), lastcat.first()) + numeric_identity = self.tables.numeric_identity + # for some reason, T-SQL does not like .values(), but this works + result = conn.execute( + numeric_identity.insert(), dict(description="T-SQL") + ) + eq_(result.inserted_primary_key, (decimal.Decimal("1"),)) + def test_executemany(self, connection): conn = connection cattable = self.tables.cattable From 61662b25b87d3973eb5bcc1b5c0d2f5908f24405 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 17 Jun 2022 21:53:32 +0200 Subject: [PATCH 278/632] Normalize postgresql docs links to point to current Change-Id: Ib7d3ea7ff3356ff8a2f935892d904a69dbc25c3e (cherry picked from commit 3bde1eae31212b5bc2c6652dca52f70fb79d5950) --- doc/build/changelog/migration_07.rst | 3 +-- doc/build/orm/versioning.rst | 4 ++-- lib/sqlalchemy/dialects/postgresql/base.py | 13 +++++++++---- lib/sqlalchemy/dialects/postgresql/ext.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 4 ++-- lib/sqlalchemy/dialects/postgresql/ranges.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- lib/sqlalchemy/sql/schema.py | 4 ++-- lib/sqlalchemy/sql/sqltypes.py | 16 ++++++++-------- test/dialect/postgresql/test_reflection.py | 2 +- 10 files changed, 28 insertions(+), 24 deletions(-) diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index 12a3c23e6d0..a222f5380bd 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -373,8 +373,7 @@ The best introduction to window functions is on PostgreSQL's site, where window functions have been supported since version 8.4: -https://www.postgresql.org/docs/9.0/static/tutorial- -window.html +https://www.postgresql.org/docs/current/static/tutorial-window.html SQLAlchemy provides a simple construct typically invoked via an existing function clause, using the ``over()`` method, diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index a141df6a0cd..30388eb8d24 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -45,7 +45,7 @@ transaction). .. seealso:: - `Repeatable Read Isolation Level `_ - PostgreSQL's implementation of repeatable read, including a description of the error condition. + `Repeatable Read Isolation Level `_ - PostgreSQL's implementation of repeatable read, including a description of the error condition. Simple Version Counting ----------------------- @@ -141,7 +141,7 @@ some means of generating new identifiers when a row is subject to an INSERT as well as with an UPDATE. For the UPDATE case, typically an update trigger is needed, unless the database in question supports some other native version identifier. The PostgreSQL database in particular supports a system -column called `xmin `_ +column called `xmin `_ which provides UPDATE versioning. We can make use of the PostgreSQL ``xmin`` column to version our ``User`` class as follows:: diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 92d9e263e87..db88d9e6a89 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -409,7 +409,7 @@ def set_search_path(dbapi_connection, connection_record): from a backend-agnostic perspective `The Schema Search Path - `_ + `_ - on the PostgreSQL website. INSERT/UPDATE...RETURNING @@ -735,7 +735,7 @@ def set_search_path(dbapi_connection, connection_record): It's important to remember that text searching in PostgreSQL is powerful but complicated, and SQLAlchemy users are advised to reference the PostgreSQL documentation regarding - `Full Text Search `_. + `Full Text Search `_. There are important differences between ``to_tsquery`` and ``plainto_tsquery``, the most significant of which is that ``to_tsquery`` @@ -789,6 +789,11 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL to ensure that you are generating queries with SQLAlchemy that take full advantage of any indexes you may have created for full text search. +.. seealso:: + + `Full Text Search `_ - in the PostgreSQL documentation + + FROM ONLY ... ------------- @@ -849,7 +854,7 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL allows the specification of an *operator class* for each column of an index (see -https://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). +https://www.postgresql.org/docs/current/interactive/indexes-opclass.html). The :class:`.Index` construct allows these to be specified via the ``postgresql_ops`` keyword argument:: @@ -891,7 +896,7 @@ def set_search_path(dbapi_connection, connection_record): PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as the ability for users to create their own (see -https://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be +https://www.postgresql.org/docs/current/static/indexes-types.html). These can be specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: Index('my_index', my_table.c.data, postgresql_using='gin') diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index c3bda33b92a..9e52ee1ee9f 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -93,7 +93,7 @@ class ExcludeConstraint(ColumnCollectionConstraint): Defines an EXCLUDE constraint as described in the `PostgreSQL documentation`__. - __ https://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE + __ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE """ # noqa diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 663a17b2aae..bacd60bbeff 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -110,7 +110,7 @@ .. seealso:: `PQconnectdbParams \ - `_ + `_ .. _psycopg2_multi_host: @@ -135,7 +135,7 @@ .. seealso:: `PQConnString \ - `_ + `_ Empty DSN Connections / Environment Variable Connections --------------------------------------------------------- diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index 35cf360cff0..51f3b0489f8 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -18,7 +18,7 @@ class RangeOperators(object): provided in the ``postgres`` dialect and can likely be used for any range types you create yourself. - __ https://www.postgresql.org/docs/devel/static/functions-range.html + __ https://www.postgresql.org/docs/current/static/functions-range.html No extra support is provided for the Range Functions listed in the Range Functions table of the PostgreSQL documentation. For these, the normal diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index ba5e448acd3..0ab39120658 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -511,7 +511,7 @@ def cte(self, name=None, recursive=False, nesting=False): Here is the `PostgreSQL WITH RECURSIVE example - `_. + `_. Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias of it are Core selectables, which means the columns are accessed via the ``.c.`` attribute. The diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index bc7e65d90c8..b36ae6e8a5d 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -3749,8 +3749,8 @@ def __init__( ) else: # e.g. FOREIGN KEY (a) REFERENCES r (b, c) - # paraphrasing https://www.postgresql.org/docs/9.2/static/\ - # ddl-constraints.html + # paraphrasing + # https://www.postgresql.org/docs/current/static/ddl-constraints.html raise exc.ArgumentError( "ForeignKeyConstraint number " "of constrained columns must match the number of " diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 306ac397df3..edb9c676d29 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -862,8 +862,8 @@ def python_type(self): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: {Interval: self.__class__}, @@ -886,8 +886,8 @@ def python_type(self): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: { @@ -934,8 +934,8 @@ def _resolve_for_literal(self, value): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: {Date: DateTime, Interval: self.__class__}, @@ -2014,8 +2014,8 @@ def result_processor(self, dialect, coltype): class _AbstractInterval(_LookupExpressionAdapter, TypeEngine): @util.memoized_property def _expression_adaptations(self): - # Based on https://www.postgresql.org/docs/current/\ - # static/functions-datetime.html. + # Based on + # https://www.postgresql.org/docs/current/static/functions-datetime.html. return { operators.add: { diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index 3502c745b26..bf8cd511116 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -1091,7 +1091,7 @@ def test_index_reflection_with_sorting(self, metadata, connection): # "ASC NULLS LAST" is implicit default for indexes, # and "NULLS FIRST" is implicit default for "DESC". - # (https://www.postgresql.org/docs/11/indexes-ordering.html) + # (https://www.postgresql.org/docs/current/indexes-ordering.html) def compile_exprs(exprs): return list(map(str, exprs)) From 283a0f9d3004e93111a7f13031ecb681b92aa55d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 13 Jun 2022 11:46:28 -0400 Subject: [PATCH 279/632] rework ORM mapping docs prepare docs for newly incoming mapper styles, including new dataclass mapping. move the existing dataclass/attrs docs all into their own section and try to improve organization and wording into the relatively recent "mapping styles" document. Change-Id: I0b5e2a5b6a70db65ab19b5bb0a2bb7df20e0b498 (cherry picked from commit 8820e02ca9dd8acb57d757d809fff7dc03c44ee6) --- doc/build/changelog/migration_13.rst | 2 +- doc/build/changelog/migration_14.rst | 2 +- doc/build/core/inspection.rst | 11 + doc/build/dialects/oracle.rst | 1 + doc/build/glossary.rst | 16 + doc/build/index.rst | 2 +- doc/build/orm/classical.rst | 2 +- doc/build/orm/dataclasses.rst | 517 +++++++++++++++++++ doc/build/orm/declarative_config.rst | 97 +++- doc/build/orm/declarative_mixins.rst | 14 +- doc/build/orm/declarative_styles.rst | 301 ----------- doc/build/orm/declarative_tables.rst | 107 ++-- doc/build/orm/inheritance.rst | 2 +- doc/build/orm/mapper_config.rst | 20 +- doc/build/orm/mapping_columns.rst | 127 ++++- doc/build/orm/mapping_styles.rst | 308 +++++------ doc/build/orm/session_state_management.rst | 25 +- lib/sqlalchemy/ext/automap.py | 14 +- lib/sqlalchemy/ext/declarative/extensions.py | 5 + lib/sqlalchemy/orm/mapper.py | 36 +- lib/sqlalchemy/orm/state.py | 4 +- 21 files changed, 1065 insertions(+), 548 deletions(-) create mode 100644 doc/build/orm/dataclasses.rst diff --git a/doc/build/changelog/migration_13.rst b/doc/build/changelog/migration_13.rst index d7a26084e36..f54bae329d0 100644 --- a/doc/build/changelog/migration_13.rst +++ b/doc/build/changelog/migration_13.rst @@ -85,7 +85,7 @@ Relationship to AliasedClass replaces the need for non primary mappers ----------------------------------------------------------------------- The "non primary mapper" is a :func:`.mapper` created in the -:ref:`classical_mapping` style, which acts as an additional mapper against an +:ref:`orm_imperative_mapping` style, which acts as an additional mapper against an already mapped class against a different kind of selectable. The non primary mapper has its roots in the 0.1, 0.2 series of SQLAlchemy where it was anticipated that the :func:`.mapper` object was to be the primary query diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index cca8c7e0e76..b6cce48849d 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -288,7 +288,7 @@ the :class:`_orm.registry` object, and fall into these categories: * Declarative Table * Imperative Table (Hybrid) * :ref:`orm_declarative_dataclasses` -* :ref:`Imperative (a.k.a. "classical" mapping) ` +* :ref:`Imperative (a.k.a. "classical" mapping) ` * Using :meth:`_orm.registry.map_imperatively` * :ref:`orm_imperative_dataclasses` diff --git a/doc/build/core/inspection.rst b/doc/build/core/inspection.rst index eab1288422c..7816cd3fd8c 100644 --- a/doc/build/core/inspection.rst +++ b/doc/build/core/inspection.rst @@ -25,8 +25,18 @@ Below is a listing of many of the most common inspection targets. to per attribute state via the :class:`.AttributeState` interface as well as the per-flush "history" of any attribute via the :class:`.History` object. + + .. seealso:: + + :ref:`orm_mapper_inspection_instancestate` + * ``type`` (i.e. a class) - a class given will be checked by the ORM for a mapping - if so, a :class:`_orm.Mapper` for that class is returned. + + .. seealso:: + + :ref:`orm_mapper_inspection_mapper` + * mapped attribute - passing a mapped attribute to :func:`_sa.inspect`, such as ``inspect(MyClass.some_attribute)``, returns a :class:`.QueryableAttribute` object, which is the :term:`descriptor` associated with a mapped class. @@ -36,3 +46,4 @@ Below is a listing of many of the most common inspection targets. attribute. * :class:`.AliasedClass` - returns an :class:`.AliasedInsp` object. + diff --git a/doc/build/dialects/oracle.rst b/doc/build/dialects/oracle.rst index 988a698e827..81cef78d272 100644 --- a/doc/build/dialects/oracle.rst +++ b/doc/build/dialects/oracle.rst @@ -55,6 +55,7 @@ construction arguments, are as follows: .. autoclass:: RAW :members: __init__ +.. _cx_oracle: cx_Oracle --------- diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index b9b0002e844..c05cedc3299 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -74,6 +74,22 @@ Glossary # Session returns a Result that has ORM entities list_of_users = result.scalars().all() + + reflection + reflected + In SQLAlchemy, this term refers to the feature of querying a database's + schema catalogs in order to load information about existing tables, + columns, constraints, and other constructs. SQLAlchemy includes + features that can both provide raw data for this information, as well + as that it can construct Core/ORM usable :class:`.Table` objects + from database schema catalogs automatically. + + .. seealso:: + + :ref:`metadata_reflection_toplevel` - complete background on + database reflection. + + imperative declarative diff --git a/doc/build/index.rst b/doc/build/index.rst index 555ffba8a52..4a7059029ba 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -83,7 +83,7 @@ SQLAlchemy Documentation **SQLAlchemy ORM** * **ORM Configuration:** - :doc:`Mapper Configuration ` | + :doc:`Mapped Class Configuration ` | :doc:`Relationship Configuration ` * **ORM Usage:** diff --git a/doc/build/orm/classical.rst b/doc/build/orm/classical.rst index 3fd149f9285..a0bc70d890a 100644 --- a/doc/build/orm/classical.rst +++ b/doc/build/orm/classical.rst @@ -1,5 +1,5 @@ :orphan: -Moved! :ref:`classical_mapping` +Moved! :ref:`orm_imperative_mapping` diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst new file mode 100644 index 00000000000..4c1c91285e1 --- /dev/null +++ b/doc/build/orm/dataclasses.rst @@ -0,0 +1,517 @@ +.. _orm_dataclasses_toplevel: + +====================================== +Integration with dataclasses and attrs +====================================== + +SQLAlchemy 1.4 has limited support for ORM mappings that are established +against classes that have already been pre-instrumented using either Python's +built-in dataclasses_ library or the attrs_ third party integration library. + +.. tip:: SQLAlchemy 2.0 will include a new dataclass integration feature which + allows for a particular class to be mapped and converted into a Python + dataclass simultaneously, with full support for SQLAlchemy's declarative + syntax. Within the scope of the 1.4 release, the ``@dataclass`` decorator + is used separately as documented in this section. + +.. _orm_declarative_dataclasses: + +Applying ORM Mappings to an existing dataclass +---------------------------------------------- + +The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class +decorator to automatically generate boilerplate definitions of common object +methods including ``__init__()``, ``__repr()__``, and other methods. SQLAlchemy +supports the application of ORM mappings to a class after it has been processed +with the ``@dataclass`` decorator, by using either the +:meth:`_orm.registry.mapped` class decorator, or the +:meth:`_orm.registry.map_imperatively` method to apply ORM mappings to the +class using Imperative. + +.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses + +To map an existing dataclass, SQLAlchemy's "inline" declarative directives +cannot be used directly; ORM directives are assigned using one of three +techniques: + +* Using "Declarative with Imperative Table", the table / column to be mapped + is defined using a :class:`_schema.Table` object assigned to the + ``__table__`` attribute of the class; relationships are defined within + ``__mapper_args__`` dictionary. The class is mapped using the + :meth:`_orm.registry.mapped` decorator. An example is below at + :ref:`orm_declarative_dataclasses_imperative_table`. + +* Using full "Declarative", the Declarative-interpreted directives such as + :class:`_schema.Column`, :func:`_orm.relationship` are added to the + ``.metadata`` dictionary of the ``dataclasses.field()`` construct, where + they are consumed by the declarative process. The class is again + mapped using the :meth:`_orm.registry.mapped` decorator. See the example + below at :ref:`orm_declarative_dataclasses_declarative_table`. + +* An "Imperative" mapping can be applied to an existing dataclass using + the :meth:`_orm.registry.map_imperatively` method to produce the mapping + in exactly the same way as described at :ref:`orm_imperative_mapping`. + This is illustrated below at :ref:`orm_imperative_dataclasses`. + +The general process by which SQLAlchemy applies mappings to a dataclass +is the same as that of an ordinary class, but also includes that +SQLAlchemy will detect class-level attributes that were part of the +dataclasses declaration process and replace them at runtime with +the usual SQLAlchemy ORM mapped attributes. The ``__init__`` method that +would have been generated by dataclasses is left intact, as is the same +for all the other methods that dataclasses generates such as +``__eq__()``, ``__repr__()``, etc. + +.. _orm_declarative_dataclasses_imperative_table: + +Mapping dataclasses using Declarative With Imperative Table +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +An example of a mapping using ``@dataclass`` using +:ref:`orm_imperative_table_configuration` is below. A complete +:class:`_schema.Table` object is constructed explicitly and assigned to the +``__table__`` attribute. Instance fields are defined using normal dataclass +syntaxes. Additional :class:`.MapperProperty` +definitions such as :func:`.relationship`, are placed in the +:ref:`__mapper_args__ ` class-level +dictionary underneath the ``properties`` key, corresponding to the +:paramref:`_orm.mapper.properties` parameter:: + + from __future__ import annotations + + from dataclasses import dataclass, field + from typing import List, Optional + + from sqlalchemy import Column, ForeignKey, Integer, String, Table + from sqlalchemy.orm import registry, relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id: int = field(init=False) + name: Optional[str] = None + fullname: Optional[str] = None + nickname: Optional[str] = None + addresses: List[Address] = field(default_factory=list) + + __mapper_args__ = { # type: ignore + "properties": { + "addresses": relationship("Address"), + } + } + + + @mapper_registry.mapped + @dataclass + class Address: + __table__ = Table( + "address", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + id: int = field(init=False) + user_id: int = field(init=False) + email_address: Optional[str] = None + +In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` +attributes are defined as ``field(init=False)``. This means that parameters for +these won't be added to ``__init__()`` methods, but +:class:`.Session` will still be able to set them after getting their values +during flush from autoincrement or other default value generator. To +allow them to be specified in the constructor explicitly, they would instead +be given a default value of ``None``. + +For a :func:`_orm.relationship` to be declared separately, it needs to be +specified directly within the :paramref:`_orm.mapper.properties` dictionary +which itself is specified within the ``__mapper_args__`` dictionary, so that it +is passed to the :func:`_orm.mapper` construction function. An alternative to this +approach is in the next example. + +.. _orm_declarative_dataclasses_declarative_table: + +Mapping dataclasses using Declarative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The fully declarative approach requires that :class:`_schema.Column` objects +are declared as class attributes, which when using dataclasses would conflict +with the dataclass-level attributes. An approach to combine these together +is to make use of the ``metadata`` attribute on the ``dataclass.field`` +object, where SQLAlchemy-specific mapping information may be supplied. +Declarative supports extraction of these parameters when the class +specifies the attribute ``__sa_dataclass_metadata_key__``. This also +provides a more succinct method of indicating the :func:`_orm.relationship` +association:: + + + from __future__ import annotations + + from dataclasses import dataclass, field + from typing import List + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.orm import registry, relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @dataclass + class User: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + name: str = field(default=None, metadata={"sa": Column(String(50))}) + fullname: str = field(default=None, metadata={"sa": Column(String(50))}) + nickname: str = field(default=None, metadata={"sa": Column(String(12))}) + addresses: List[Address] = field( + default_factory=list, metadata={"sa": relationship("Address")} + ) + + + @mapper_registry.mapped + @dataclass + class Address: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + user_id: int = field( + init=False, metadata={"sa": Column(ForeignKey("user.id"))} + ) + email_address: str = field( + default=None, metadata={"sa": Column(String(50))} + ) + +.. _orm_imperative_dataclasses: + +Mapping dataclasses using Imperative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +As described previously, a class which is set up as a dataclass using the +``@dataclass`` decorator can then be further decorated using the +:meth:`_orm.registry.mapped` decorator in order to apply declarative-style +mapping to the class. As an alternative to using the +:meth:`_orm.registry.mapped` decorator, we may also pass the class through the +:meth:`_orm.registry.map_imperatively` method instead, so that we may pass all +:class:`_schema.Table` and :func:`_orm.mapper` configuration imperatively to +the function rather than having them defined on the class itself as class +variables:: + + from __future__ import annotations + + from dataclasses import dataclass + from dataclasses import field + from typing import List + + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + @dataclass + class User: + id: int = field(init=False) + name: str = None + fullname: str = None + nickname: str = None + addresses: List[Address] = field(default_factory=list) + + @dataclass + class Address: + id: int = field(init=False) + user_id: int = field(init=False) + email_address: str = None + + metadata_obj = MetaData() + + user = Table( + 'user', + metadata_obj, + Column('id', Integer, primary_key=True), + Column('name', String(50)), + Column('fullname', String(50)), + Column('nickname', String(12)), + ) + + address = Table( + 'address', + metadata_obj, + Column('id', Integer, primary_key=True), + Column('user_id', Integer, ForeignKey('user.id')), + Column('email_address', String(50)), + ) + + mapper_registry.map_imperatively(User, user, properties={ + 'addresses': relationship(Address, backref='user', order_by=address.c.id), + }) + + mapper_registry.map_imperatively(Address, address) + +.. _orm_declarative_dataclasses_mixin: + +Using Declarative Mixins with Dataclasses +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes +are introduced. One requirement of declarative mixins is that certain +constructs that can't be easily duplicated must be given as callables, +using the :class:`_orm.declared_attr` decorator, such as in the +example at :ref:`orm_declarative_mixins_relationships`:: + + class RefTargetMixin: + @declared_attr + def target_id(cls): + return Column("target_id", ForeignKey("target.id")) + + @declared_attr + def target(cls): + return relationship("Target") + +This form is supported within the Dataclasses ``field()`` object by using +a lambda to indicate the SQLAlchemy construct inside the ``field()``. +Using :func:`_orm.declared_attr` to surround the lambda is optional. +If we wanted to produce our ``User`` class above where the ORM fields +came from a mixin that is itself a dataclass, the form would be:: + + @dataclass + class UserMixin: + __tablename__ = "user" + + __sa_dataclass_metadata_key__ = "sa" + + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + + addresses: List[Address] = field( + default_factory=list, metadata={"sa": lambda: relationship("Address")} + ) + + + @dataclass + class AddressMixin: + __tablename__ = "address" + __sa_dataclass_metadata_key__ = "sa" + id: int = field( + init=False, metadata={"sa": Column(Integer, primary_key=True)} + ) + user_id: int = field( + init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} + ) + email_address: str = field( + default=None, metadata={"sa": Column(String(50))} + ) + + + @mapper_registry.mapped + class User(UserMixin): + pass + + + @mapper_registry.mapped + class Address(AddressMixin): + pass + +.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, + namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` + objects with foreign key declarations, to be used within "Dataclasses + with Declarative Table" style mappings. + + + +.. _orm_declarative_attrs_imperative_table: + +Applying ORM mappings to an existing attrs class +------------------------------------------------- + +The attrs_ library is a popular third party library that provides similar +features as dataclasses, with many additional features provided not +found in ordinary dataclasses. + +A class augmented with attrs_ uses the ``@define`` decorator. This decorator +initiates a process to scan the class for attributes that define the class' +behavior, which are then used to generate methods, documentation, and +annotations. + +The SQLAlchemy ORM supports mapping an attrs_ class using **Declarative with +Imperative Table** or **Imperative** mapping. The general form of these two +styles is fully equivalent to the +:ref:`orm_declarative_dataclasses_declarative_table` and +:ref:`orm_declarative_attrs_imperative_table` mapping forms used with +dataclasses, where the inline attribute directives used by dataclasses or attrs +are unchanged, and SQLAlchemy's table-oriented instrumentation is applied at +runtime. + +The ``@define`` decorator of attrs_ by default replaces the annotated class +with a new __slots__ based class, which is not supported. When using the old +style annotation ``@attr.s`` or using ``define(slots=False)``, the class +does not get replaced. Furthermore attrs removes its own class-bound attributes +after the decorator runs, so that SQLAlchemy's mapping process takes over these +attributes without any issue. Both decorators, ``@attr.s`` and ``@define(slots=False)`` +work with SQLAlchemy. + +Mapping attrs with Declarative "Imperative Table" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +In the "Declarative with Imperative Table" style, a :class:`_schema.Table` +object is declared inline with the declarative class. The +``@define`` decorator is applied to the class first, then the +:meth:`_orm.registry.mapped` decorator second:: + + + from __future__ import annotations + + from typing import List + + from attrs import define + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + + @mapper_registry.mapped + @define(slots=False) + class User: + __table__ = Table( + "user", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), + ) + id: int + name: str + fullname: str + nickname: str + addresses: List[Address] + + __mapper_args__ = { # type: ignore + "properties": { + "addresses": relationship("Address"), + } + } + + @mapper_registry.mapped + @define(slots=False) + class Address: + __table__ = Table( + "address", + mapper_registry.metadata, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) + id: int + user_id: int + email_address: Optional[str] + + +.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on + a mapped class, cannot be used with SQLAlchemy mappings without fully + implementing alternative + :ref:`attribute instrumentation `, as mapped + classes normally rely upon direct access to ``__dict__`` for state storage. + Behavior is undefined when this option is present. + + + +Mapping attrs with Imperative Mapping +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Just as is the case with dataclasses, we can make use of +:meth:`_orm.registry.map_imperatively` to map an existing ``attrs`` class +as well:: + + from __future__ import annotations + + from typing import List + + from attrs import define + from sqlalchemy import Column + from sqlalchemy import ForeignKey + from sqlalchemy import Integer + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy.orm import registry + from sqlalchemy.orm import relationship + + mapper_registry = registry() + + @define(slots=False) + class User: + id: int + name: str + fullname: str + nickname: str + addresses: List[Address] + + @define(slots=False) + class Address: + id: int + user_id: int + email_address: Optional[str] + + metadata_obj = MetaData() + + user = Table( + 'user', + metadata_obj, + Column('id', Integer, primary_key=True), + Column('name', String(50)), + Column('fullname', String(50)), + Column('nickname', String(12)), + ) + + address = Table( + 'address', + metadata_obj, + Column('id', Integer, primary_key=True), + Column('user_id', Integer, ForeignKey('user.id')), + Column('email_address', String(50)), + ) + + mapper_registry.map_imperatively(User, user, properties={ + 'addresses': relationship(Address, backref='user', order_by=address.c.id), + }) + + mapper_registry.map_imperatively(Address, address) + +The above form is equivalent to the previous example using +Declarative with Imperative Table. + + + +.. _dataclasses: https://docs.python.org/3/library/dataclasses.html +.. _attrs: https://pypi.org/project/attrs/ diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 2386b6bcd1a..55bf0f74c79 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -174,10 +174,35 @@ using the ``__mapper_args__`` declarative class variable, which is a dictionary that is passed as keyword arguments to the :func:`_orm.mapper` function. Some examples: +**Map Specific Primary Key Columns** + +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.primary_key` parameter, which establishes +particular columns as part of what the ORM should consider to be a primary +key for the class, independently of schema-level primary key constraints:: + + class GroupUsers(Base): + __tablename__ = 'group_users' + + user_id = Column(String(40)) + group_id = Column(String(40)) + + __mapper_args__ = { + "primary_key": [user_id, group_id] + } + +.. seealso:: + + :ref:`mapper_primary_key` - further background on ORM mapping of explicit + columns as primary key columns + **Version ID Column** -The :paramref:`_orm.mapper.version_id_col` and -:paramref:`_orm.mapper.version_id_generator` parameters:: +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.version_id_col` and +:paramref:`_orm.mapper.version_id_generator` parameters, which configure +an ORM-maintained version counter that is updated and checked within the +:term:`unit of work` flush process:: from datetime import datetime @@ -193,10 +218,16 @@ The :paramref:`_orm.mapper.version_id_col` and "version_id_generator": lambda v: datetime.now(), } +.. seealso:: + + :ref:`mapper_version_counter` - background on the ORM version counter feature + **Single Table Inheritance** -The :paramref:`_orm.mapper.polymorphic_on` and -:paramref:`_orm.mapper.polymorphic_identity` parameters:: +The example below illustrates Declarative-level settings for the +:paramref:`_orm.mapper.polymorphic_on` and +:paramref:`_orm.mapper.polymorphic_identity` parameters, which are used when +configuring a single-table inheritance mapping:: class Person(Base): __tablename__ = "person" @@ -215,15 +246,69 @@ The :paramref:`_orm.mapper.polymorphic_on` and polymorphic_identity="employee", ) + +.. seealso:: + + :ref:`single_inheritance` - background on the ORM single table inheritance + mapping feature. + +Constructing mapper arguments dynamically +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + The ``__mapper_args__`` dictionary may be generated from a class-bound descriptor method rather than from a fixed dictionary by making use of the -:func:`_orm.declared_attr` construct. The section :ref:`orm_mixins_toplevel` -discusses this concept further. +:func:`_orm.declared_attr` construct. This is useful to create arguments +for mappers that are programmatically derived from the table configuration +or other aspects of the mapped class. A dynamic ``__mapper_args__`` +attribute will typically be useful when using a Declarative Mixin or +abstract base class. + +For example, to omit from the mapping +any columns that have a special :attr:`.Column.info` value, a mixin +can use a ``__mapper_args__`` method that scans for these columns from the +``cls.__table__`` attribute and passes them to the :paramref:`_orm.mapper.exclude_properties` +collection:: + + from sqlalchemy import Column + from sqlalchemy import Integer + from sqlalchemy import select + from sqlalchemy import String + from sqlalchemy.orm import declarative_base + from sqlalchemy.orm import declared_attr + + + class ExcludeColsWFlag: + @declared_attr + def __mapper_args__(cls): + return { + "exclude_properties": [ + column.key for column in cls.__table__.c if + column.info.get("exclude", False) + ] + } + + Base = declarative_base() + + class SomeClass(ExcludeColsWFlag, Base): + __tablename__ = 'some_table' + + id = Column(Integer, primary_key=True) + data = Column(String) + not_needed = Column(String, info={"exclude": True}) + + +Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__`` +hook that will scan for :class:`.Column` objects that include the key/value +``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then +add their string "key" name to the :paramref:`_orm.mapper.exclude_properties` +collection which will prevent the resulting :class:`.Mapper` from considering +these columns for any SQL operations. .. seealso:: :ref:`orm_mixins_toplevel` + Other Declarative Mapping Directives -------------------------------------- diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index a5a85a791ad..1221616d504 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -14,6 +14,14 @@ usage of mixin classes, as well as via augmenting the declarative base produced by either the :meth:`_orm.registry.generate_base` method or :func:`_orm.declarative_base` functions. +When using mixins or abstract base classes with Declarative, a decorator +known as :func:`_orm.declared_attr` is frequently used. This decorator +allows the creation of class methods that produce a parameter or ORM construct that will be +part of a declarative mapping. Generating constructs using a callable +allows for Declarative to get a new copy of a particular kind of object +each time it calls upon the mixin or abstract base on behalf of a new +class that's being mapped. + An example of some commonly mixed-in idioms is below:: from sqlalchemy.orm import declarative_mixin, declared_attr @@ -37,7 +45,11 @@ An example of some commonly mixed-in idioms is below:: Where above, the class ``MyModel`` will contain an "id" column as the primary key, a ``__tablename__`` attribute that derives from the name of the class itself, as well as ``__table_args__`` -and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. +and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. The +:func:`_orm.declared_attr` decorator applied to a class method called +``def __tablename__(cls):`` has the effect of turning the method into a class +method while also indicating to Declarative that this attribute is significant +within the mapping. .. tip:: diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index dd72e9c1a0c..7a68d6fbb32 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -189,304 +189,3 @@ The decorator form of mapping is particularly useful when combining a SQLAlchemy declarative mapping with other forms of class declaration, notably the Python ``dataclasses`` module. See the next section. -.. _orm_declarative_dataclasses: - -Declarative Mapping with Dataclasses and Attrs ----------------------------------------------- - -The dataclasses_ module, added in Python 3.7, provides a ``@dataclass`` class -decorator to automatically generate boilerplate definitions of ``__init__()``, -``__eq__()``, ``__repr()__``, etc. methods. Another very popular library that does -the same, and much more, is attrs_, which uses the ``@define`` decorator. -Both libraries make use of class decorators in order to scan a class for -attributes that define the class' behavior, which are then used to generate -methods, documentation, and annotations. - -The :meth:`_orm.registry.mapped` class decorator allows the declarative mapping -of a class to occur after the class has been fully constructed, allowing the -class to be processed by other class decorators first. The ``@dataclass`` -and ``@define`` decorators may therefore be applied first before the -ORM mapping process proceeds via the :meth:`_orm.registry.mapped` decorator -or via the :meth:`_orm.registry.map_imperatively` method discussed in a -later section. - -Mapping with ``@dataclass`` or ``@define`` may be used in a straightforward -way with :ref:`orm_imperative_table_configuration` style, where the -the :class:`_schema.Table`, which means that it is defined separately and -associated with the class via the ``__table__``. For dataclasses specifically, -:ref:`orm_declarative_table` is also supported. - -.. versionadded:: 1.4.0b2 Added support for full declarative mapping when using - dataclasses. - -When attributes are defined using ``dataclasses``, the ``@dataclass`` -decorator consumes them but leaves them in place on the class. -SQLAlchemy's mapping process, when it encounters an attribute that normally -is to be mapped to a :class:`_schema.Column`, checks explicitly if the -attribute is part of a Dataclasses setup, and if so will **replace** -the class-bound dataclass attribute with its usual mapped -properties. The ``__init__`` method created by ``@dataclass`` is left -intact. The ``@define`` decorator of attrs_ by default replaces the annotated class -with a new __slots__ based class, which is not supported. When using the old -style annotation ``@attr.s`` or using ``define(slots=False)``, the class -does not get replaced. Furthermore attrs removes its own class-bound attributes -after the decorator runs, so that SQLAlchemy's mapping process takes over these -attributes without any issue. Both decorators, ``@attr.s`` and ``@define(slots=False)`` -work with SQLAlchemy. - -.. versionadded:: 1.4 Added support for direct mapping of Python dataclasses, - where the :class:`_orm.Mapper` will now detect attributes that are specific - to the ``@dataclasses`` module and replace them at mapping time, rather - than skipping them as is the default behavior for any class attribute - that's not part of the mapping. - -.. _orm_declarative_dataclasses_imperative_table: - -Example One - Dataclasses with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -An example of a mapping using ``@dataclass`` using -:ref:`orm_imperative_table_configuration` is as follows:: - - from __future__ import annotations - - from dataclasses import dataclass, field - from typing import List, Optional - - from sqlalchemy import Column, ForeignKey, Integer, String, Table - from sqlalchemy.orm import registry, relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id: int = field(init=False) - name: Optional[str] = None - fullname: Optional[str] = None - nickname: Optional[str] = None - addresses: List[Address] = field(default_factory=list) - - __mapper_args__ = { # type: ignore - "properties": { - "addresses": relationship("Address"), - } - } - - - @mapper_registry.mapped - @dataclass - class Address: - __table__ = Table( - "address", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("user_id", Integer, ForeignKey("user.id")), - Column("email_address", String(50)), - ) - id: int = field(init=False) - user_id: int = field(init=False) - email_address: Optional[str] = None - -In the above example, the ``User.id``, ``Address.id``, and ``Address.user_id`` -attributes are defined as ``field(init=False)``. This means that parameters for -these won't be added to ``__init__()`` methods, but -:class:`.Session` will still be able to set them after getting their values -during flush from autoincrement or other default value generator. To -allow them to be specified in the constructor explicitly, they would instead -be given a default value of ``None``. - -For a :func:`_orm.relationship` to be declared separately, it needs to be -specified directly within the :paramref:`_orm.Mapper.properties` dictionary -which itself is specified within the ``__mapper_args__`` dictionary, so that it -is passed to the constructor for :class:`_orm.Mapper`. An alternative to this -approach is in the next example. - -.. _orm_declarative_dataclasses_declarative_table: - -Example Two - Dataclasses with Declarative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The fully declarative approach requires that :class:`_schema.Column` objects -are declared as class attributes, which when using dataclasses would conflict -with the dataclass-level attributes. An approach to combine these together -is to make use of the ``metadata`` attribute on the ``dataclass.field`` -object, where SQLAlchemy-specific mapping information may be supplied. -Declarative supports extraction of these parameters when the class -specifies the attribute ``__sa_dataclass_metadata_key__``. This also -provides a more succinct method of indicating the :func:`_orm.relationship` -association:: - - - from __future__ import annotations - - from dataclasses import dataclass, field - from typing import List - - from sqlalchemy import Column, ForeignKey, Integer, String - from sqlalchemy.orm import registry, relationship - - mapper_registry = registry() - - - @mapper_registry.mapped - @dataclass - class User: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - name: str = field(default=None, metadata={"sa": Column(String(50))}) - fullname: str = field(default=None, metadata={"sa": Column(String(50))}) - nickname: str = field(default=None, metadata={"sa": Column(String(12))}) - addresses: List[Address] = field( - default_factory=list, metadata={"sa": relationship("Address")} - ) - - - @mapper_registry.mapped - @dataclass - class Address: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - -.. _orm_declarative_dataclasses_mixin: - -Using Declarative Mixins with Dataclasses -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the section :ref:`orm_mixins_toplevel`, Declarative Mixin classes -are introduced. One requirement of declarative mixins is that certain -constructs that can't be easily duplicated must be given as callables, -using the :class:`_orm.declared_attr` decorator, such as in the -example at :ref:`orm_declarative_mixins_relationships`:: - - class RefTargetMixin: - @declared_attr - def target_id(cls): - return Column("target_id", ForeignKey("target.id")) - - @declared_attr - def target(cls): - return relationship("Target") - -This form is supported within the Dataclasses ``field()`` object by using -a lambda to indicate the SQLAlchemy construct inside the ``field()``. -Using :func:`_orm.declared_attr` to surround the lambda is optional. -If we wanted to produce our ``User`` class above where the ORM fields -came from a mixin that is itself a dataclass, the form would be:: - - @dataclass - class UserMixin: - __tablename__ = "user" - - __sa_dataclass_metadata_key__ = "sa" - - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - - addresses: List[Address] = field( - default_factory=list, metadata={"sa": lambda: relationship("Address")} - ) - - - @dataclass - class AddressMixin: - __tablename__ = "address" - __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) - - - @mapper_registry.mapped - class User(UserMixin): - pass - - - @mapper_registry.mapped - class Address(AddressMixin): - pass - -.. versionadded:: 1.4.2 Added support for "declared attr" style mixin attributes, - namely :func:`_orm.relationship` constructs as well as :class:`_schema.Column` - objects with foreign key declarations, to be used within "Dataclasses - with Declarative Table" style mappings. - -.. _orm_declarative_attrs_imperative_table: - -Example Three - attrs with Imperative Table -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -A mapping using ``@define`` from attrs_, in conjunction with imperative table:: - - import attr - from sqlalchemy.orm import registry - - # other imports - - - mapper_registry = registry() - - - @mapper_registry.mapped - @define(slots=False) - class User: - __table__ = Table( - "user", - mapper_registry.metadata, - Column("id", Integer, primary_key=True), - Column("name", String(50)), - Column("fullname", String(50)), - Column("nickname", String(12)), - ) - id: int - name: str - fullname: str - nickname: str - addresses: List[Address] - - - # other classes... - - -``@dataclass`` and attrs_ mappings may also be used with classical mappings, i.e. -with the :meth:`_orm.registry.map_imperatively` function. See the section -:ref:`orm_imperative_dataclasses` for a similar example. - -.. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on - a mapped class, cannot be used with SQLAlchemy mappings without fully - implementing alternative - :ref:`attribute instrumentation `, as mapped - classes normally rely upon direct access to ``__dict__`` for state storage. - Behavior is undefined when this option is present. - -.. _dataclasses: https://docs.python.org/3/library/dataclasses.html -.. _attrs: https://pypi.org/project/attrs/ diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index 72a48078d24..b8804a21d58 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -64,6 +64,11 @@ to produce a :class:`_schema.Table` that is equivalent to:: Column("nickname", String), ) +.. seealso:: + + :ref:`mapping_columns_toplevel` - contains additional notes on affecting + how :class:`_orm.Mapper` interprets incoming :class:`.Column` objects. + .. _orm_declarative_metadata: Accessing Table and Metadata @@ -183,36 +188,26 @@ or :func:`_orm.declarative_base`:: .. _orm_declarative_table_adding_columns: -Adding New Columns -^^^^^^^^^^^^^^^^^^^ - -The declarative table configuration allows the addition of new -:class:`_schema.Column` objects under two scenarios. The most basic -is that of simply assigning new :class:`_schema.Column` objects to the -class:: - - MyClass.some_new_column = Column("data", Unicode) - -The above operation performed against a declarative class that has been -mapped using the declarative base (note, not the decorator form of declarative) -will add the above :class:`_schema.Column` to the :class:`_schema.Table` -using the :meth:`_schema.Table.append_column` method and will also add the -column to the :class:`_orm.Mapper` to be fully mapped. +Appending additional columns to an existing Declarative mapped class +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. note:: assignment of new columns to an existing declaratively mapped class - will only function correctly if the "declarative base" class is used, which - also provides for a metaclass-driven ``__setattr__()`` method which will - intercept these operations. It will **not** work if the declarative - decorator provided by - :meth:`_orm.registry.mapped` is used, nor will it work for an imperatively - mapped class mapped by :meth:`_orm.registry.map_imperatively`. +A declarative table configuration allows the addition of new +:class:`_schema.Column` objects an existing mapping after the :class:`.Table` +metadata has already been generated. +For a declarative class that is declared using a declarative base class, +the underlying metaclass :class:`.DeclarativeMeta` includes a ``__setattr__()`` +method that will intercept additional :class:`.Column` objects and +add them to both the :class:`.Table` using :meth:`.Table.append_column` +as well as to the existing :class:`.Mapper` using :meth:`.Mapper.add_property`:: -The other scenario where a :class:`_schema.Column` is added on the fly is -when an inheriting subclass that has no table of its own indicates -additional columns; these columns will be added to the superclass table. -The section :ref:`single_inheritance` discusses single table inheritance. + MyClass.some_new_column = Column("data", Unicode) +Additional :class:`_schema.Column` objects may also be added to a mapping +in the specific circumstance of using single table inheritance, where +additional columns are present on mapped subclasses that have +no :class:`.Table` of their own. This is illustrated in the section +:ref:`single_inheritance`. .. _orm_imperative_table_configuration: @@ -344,10 +339,15 @@ use a declarative hybrid mapping, passing the :paramref:`_schema.Table.autoload_with` parameter to the :class:`_schema.Table`:: + from sqlalchemy import create_engine + from sqlalchemy import Table + from sqlalchemy.orm import declarative_base + engine = create_engine( "postgresql+psycopg2://user:pass@hostname/my_existing_database" ) + Base = declarative_base() class MyClass(Base): __table__ = Table( @@ -356,17 +356,47 @@ use a declarative hybrid mapping, passing the autoload_with=engine, ) -A major downside of the above approach however is that it requires the database +A variant on the above pattern that scales much better is to use the +:meth:`.MetaData.reflect` method to reflect a full set of :class:`.Table` +objects at once, then refer to them from the :class:`.MetaData`:: + + + from sqlalchemy import create_engine + from sqlalchemy import Table + from sqlalchemy.orm import declarative_base + + engine = create_engine( + "postgresql+psycopg2://user:pass@hostname/my_existing_database" + ) + + Base = declarative_base() + + Base.metadata.reflect(engine) + + class MyClass(Base): + __table__ = Base.metadata.tables['mytable'] + +.. seealso:: + + :ref:`mapper_automated_reflection_schemes` - further notes on using + table reflection with mapped classes + +A major downside to the above approach is that the mapped classes cannot +be declared until the tables have been reflected, which requires the database connectivity source to be present while the application classes are being declared; it's typical that classes are declared as the modules of an application are being imported, but database connectivity isn't available until the application starts running code so that it can consume configuration -information and create an engine. +information and create an engine. There are currently two approaches +to working around this. + +.. _orm_declarative_reflected_deferred_reflection: Using DeferredReflection ^^^^^^^^^^^^^^^^^^^^^^^^^ -To accommodate this case, a simple extension called the +To accommodate the use case of declaring mapped classes where reflection of +table metadata can occur afterwards, a simple extension called the :class:`.DeferredReflection` mixin is available, which alters the declarative mapping process to be delayed until a special class-level :meth:`.DeferredReflection.prepare` method is called, which will perform @@ -408,17 +438,22 @@ complete until we do so, given an :class:`_engine.Engine`:: The purpose of the ``Reflected`` class is to define the scope at which classes should be reflectively mapped. The plugin will search among the subclass tree of the target against which ``.prepare()`` is called and reflect -all tables. +all tables which are named by declared classes; tables in the target database +that are not part of mappings and are not related to the target tables +via foreign key constraint will not be reflected. Using Automap ^^^^^^^^^^^^^^ -A more automated solution to mapping against an existing database where -table reflection is to be used is to use the :ref:`automap_toplevel` -extension. This extension will generate entire mapped classes from a -database schema, and allows several hooks for customization including the -ability to explicitly map some or all classes while still making use of -reflection to fill in the remaining columns. +A more automated solution to mapping against an existing database where table +reflection is to be used is to use the :ref:`automap_toplevel` extension. This +extension will generate entire mapped classes from a database schema, including +relationships between classes based on observed foreign key constraints. While +it includes hooks for customization, such as hooks that allow custom +class naming and relationship naming schemes, automap is oriented towards an +expedient zero-configuration style of working. If an application wishes to have +a fully explicit model that makes use of table reflection, the +:ref:`orm_declarative_reflected_deferred_reflection` may be preferable. .. seealso:: diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index d179864ebc5..e9b1998ee5b 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -995,7 +995,7 @@ that we omit the ``employee`` table. .. seealso:: - :ref:`classical_mapping` - background information on "classical" mappings + :ref:`orm_imperative_mapping` - background information on imperative, or "classical" mappings diff --git a/doc/build/orm/mapper_config.rst b/doc/build/orm/mapper_config.rst index 4de08690329..13d2ce860db 100644 --- a/doc/build/orm/mapper_config.rst +++ b/doc/build/orm/mapper_config.rst @@ -1,13 +1,20 @@ .. _mapper_config_toplevel: -==================== -Mapper Configuration -==================== +=============================== +ORM Mapped Class Configuration +=============================== -This section describes a variety of configurational patterns that are usable -with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and -know how to construct and use rudimentary mappers and relationships. +Detailed reference for ORM configuration, not including +relationships, which are detailed at +:ref:`relationship_config_toplevel`. + +For a quick look at a typical ORM configuration, start with +:ref:`orm_quickstart`. + +For an introduction to the concept of object relational mapping as implemented +in SQLAlchemy, it's first introduced in the :ref:`unified_tutorial` at +:ref:`tutorial_orm_table_metadata`. .. toctree:: @@ -15,6 +22,7 @@ know how to construct and use rudimentary mappers and relationships. mapping_styles declarative_mapping + dataclasses scalar_mapping inheritance nonstandard_mappings diff --git a/doc/build/orm/mapping_columns.rst b/doc/build/orm/mapping_columns.rst index 596c64f7c5a..788d5776ef9 100644 --- a/doc/build/orm/mapping_columns.rst +++ b/doc/build/orm/mapping_columns.rst @@ -5,11 +5,30 @@ Mapping Table Columns ===================== -The default behavior of :func:`_orm.mapper` is to assemble all the columns in -the mapped :class:`_schema.Table` into mapped object attributes, each of which are -named according to the name of the column itself (specifically, the ``key`` -attribute of :class:`_schema.Column`). This behavior can be -modified in several ways. +Introductory background on mapping to columns falls under the subject of +:class:`.Table` configuration; the general form falls under one of three +forms: + +* :ref:`orm_declarative_table` - :class:`.Column` objects are associated with a + :class:`.Table` as well as with an ORM mapping in one step by declaring + them inline as class attributes. +* :ref:`orm_imperative_table_configuration` - :class:`.Column` objects are + associated directly with their :class:`.Table` object, as detailed at + :ref:`metadata_describing_toplevel`; the columns are then mapped by + the Declarative process by associating the :class:`.Table` with the + class to be mapped via the ``__table__`` attribute. +* :ref:`orm_imperative_mapping` - like "Imperative Table", :class:`.Column` + objects are associated directly with their :class:`.Table` object; the + columns are then mapped by the Imperative process using + :meth:`_orm.registry.map_imperatively`. + +In all of the above cases, the :class:`_orm.mapper` constructor is ultimately +invoked with a completed :class:`.Table` object passed as the selectable unit +to be mapped. The behavior of :class:`_orm.mapper` then is to assemble all the +columns in the mapped :class:`_schema.Table` into mapped object attributes, +each of which are named according to the name of the column itself +(specifically, the ``key`` attribute of :class:`_schema.Column`). This behavior +can be modified in several ways. .. _mapper_column_distinct_names: @@ -51,7 +70,6 @@ dictionary with the desired key:: 'name': user_table.c.user_name, }) -In the next section we'll examine the usage of ``.key`` more closely. .. _mapper_automated_reflection_schemes: @@ -62,7 +80,7 @@ In the previous section :ref:`mapper_column_distinct_names`, we showed how a :class:`_schema.Column` explicitly mapped to a class can have a different attribute name than the column. But what if we aren't listing out :class:`_schema.Column` objects explicitly, and instead are automating the production of :class:`_schema.Table` -objects using reflection (e.g. as described in :ref:`metadata_reflection_toplevel`)? +objects using reflection (i.e. as described in :ref:`metadata_reflection_toplevel`)? In this case we can make use of the :meth:`_events.DDLEvents.column_reflect` event to intercept the production of :class:`_schema.Column` objects and provide them with the :attr:`_schema.Column.key` of our choice. The event is most easily @@ -82,33 +100,19 @@ with our event that adds a new ".key" element, such as in a mapping as below:: __table__ = Table("some_table", Base.metadata, autoload_with=some_engine) -The approach also works with the :ref:`automap_toplevel` extension. See -the section :ref:`automap_intercepting_columns` for background. +The approach also works with both the :class:`.DeferredReflection` base class +as well as with the :ref:`automap_toplevel` extension. For automap +specifically, see the section :ref:`automap_intercepting_columns` for +background. .. seealso:: + :ref:`orm_declarative_reflected` + :meth:`_events.DDLEvents.column_reflect` :ref:`automap_intercepting_columns` - in the :ref:`automap_toplevel` documentation -.. _column_prefix: - -Naming All Columns with a Prefix --------------------------------- - -A quick approach to prefix column names, typically when mapping -to an existing :class:`_schema.Table` object, is to use ``column_prefix``:: - - class User(Base): - __table__ = user_table - __mapper_args__ = {'column_prefix':'_'} - -The above will place attribute names such as ``_user_id``, ``_user_name``, -``_password`` etc. on the mapped ``User`` class. - -This approach is uncommon in modern usage. For dealing with reflected -tables, a more flexible approach is to use that described in -:ref:`mapper_automated_reflection_schemes`. .. _column_property_options: @@ -162,6 +166,75 @@ See examples of this usage at :ref:`mapper_sql_expressions`. .. autofunction:: column_property +.. _mapper_primary_key: + +Mapping to an Explicit Set of Primary Key Columns +------------------------------------------------- + +The :class:`.Mapper` construct in order to successfully map a table always +requires that at least one column be identified as the "primary key" for +that selectable. This is so that when an ORM object is loaded or persisted, +it can be placed in the :term:`identity map` with an appropriate +:term:`identity key`. + +To support this use case, all :class:`.FromClause` objects (where +:class:`.FromClause` is the common base for objects such as :class:`.Table`, +:class:`.Join`, :class:`.Subquery`, etc.) have an attribute +:attr:`.FromClause.primary_key` which returns a collection of those +:class:`.Column` objects that indicate they are part of a "primary key", +which is derived from each :class:`.Column` object being a member of a +:class:`.PrimaryKeyConstraint` collection that's associated with the +:class:`.Table` from which they ultimately derive. + +In those cases where the selectable being mapped does not include columns +that are explicitly part of the primary key constraint on their parent table, +a user-defined set of primary key columns must be defined. The +:paramref:`.mapper.primary_key` parameter is used for this purpose. + +Given the following example of a :ref:`Imperative Table ` +mapping against an existing :class:`.Table` object, as would occur in a scenario +such as when the :class:`.Table` were :term:`reflected` from an existing +database, where the table does not have any declared primary key, we may +map such a table as in the following example:: + + from sqlalchemy import Column + from sqlalchemy import MetaData + from sqlalchemy import String + from sqlalchemy import Table + from sqlalchemy import UniqueConstraint + from sqlalchemy.orm import declarative_base + + + metadata = MetaData() + group_users = Table( + "group_users", + metadata, + Column("user_id", String(40), nullable=False), + Column("group_id", String(40), nullable=False), + UniqueConstraint("user_id", "group_id") + ) + + + Base = declarative_base() + + + class GroupUsers(Base): + __table__ = group_users + __mapper_args__ = { + "primary_key": [group_users.c.user_id, group_users.c.group_id] + } + +Above, the ``group_users`` table is an association table of some kind +with string columns ``user_id`` and ``group_id``, but no primary key is set up; +instead, there is only a :class:`.UniqueConstraint` establishing that the +two columns represent a unique key. The :class:`.Mapper` does not automatically +inspect unique constraints for primary keys; instead, we make use of the +:paramref:`.mapper.primary_key` parameter, passing a collection of +``[group_users.c.user_id, group_users.c.group_id]``, indicating that these two +columns should be used in order to construct the identity key for instances +of the ``GroupUsers`` class. + + .. _include_exclude_cols: Mapping a Subset of Table Columns diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index 7c7817aec4d..edacadf71d1 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -1,51 +1,61 @@ .. _orm_mapping_classes_toplevel: -======================= -Mapping Python Classes -======================= +========================== +ORM Mapped Class Overview +========================== + +Overview of ORM class mapping configuration. + +For readers new to the SQLAlchemy ORM and/or new to Python in general, +it's recommended to browse through the +:ref:`orm_quickstart` and preferably to work through the +:ref:`unified_tutorial`, where ORM configuration is first introduced at +:ref:`tutorial_orm_table_metadata`. + + +ORM Mapping Styles +================== + +SQLAlchemy features two distinct styles of mapper configuration, which then +feature further sub-options for how they are set up. The variability in mapper +styles is present to suit a varied list of developer preferences, including +the degree of abstraction of a user-defined class from how it is to be +mapped to relational schema tables and columns, what kinds of class hierarchies +are in use, including whether or not custom metaclass schemes are present, +and finally if there are other class-instrumentation approaches present such +as if Python dataclasses_ are in use simultaneously. + +In modern SQLAlchemy, the difference between these styles is mostly +superficial; when a particular SQLAlchemy configurational style is used to +express the intent to map a class, the internal process of mapping the class +proceeds in mostly the same way for each, where the end result is always a +user-defined class that has a :class:`_orm.Mapper` configured against a +selectable unit, typically represented by a :class:`_schema.Table` object, and +the class itself has been :term:`instrumented` to include behaviors linked to +relational operations both at the level of the class as well as on instances of +that class. As the process is basically the same in all cases, classes mapped +from different styles are always fully interoperable with each other. -SQLAlchemy historically features two distinct styles of mapper configuration. The original mapping API is commonly referred to as "classical" style, whereas the more automated style of mapping is known as "declarative" style. SQLAlchemy now refers to these two mapping styles as **imperative mapping** and **declarative mapping**. -Both styles may be used interchangeably, as the end result of each is exactly -the same - a user-defined class that has a :class:`_orm.Mapper` configured -against a selectable unit, typically represented by a :class:`_schema.Table` -object. - -Both imperative and declarative mapping begin with an ORM :class:`_orm.registry` -object, which maintains a set of classes that are mapped. This registry -is present for all mappings. +Regardless of what style of mapping used, all ORM mappings as of SQLAlchemy 1.4 +originate from a single object known as :class:`_orm.registry`, which is a +registry of mapped classes. Using this registry, a set of mapper configurations +can be finalized as a group, and classes within a particular registry may refer +to each other by name within the configurational process. .. versionchanged:: 1.4 Declarative and classical mapping are now referred to as "declarative" and "imperative" mapping, and are unified internally, all originating from the :class:`_orm.registry` construct that represents a collection of related mappings. -The full suite of styles can be hierarchically organized as follows: - -* :ref:`orm_declarative_mapping` - * Using :func:`_orm.declarative_base` Base class w/ metaclass - * :ref:`orm_declarative_table` - * :ref:`Imperative Table (a.k.a. "hybrid table") ` - * Using :meth:`_orm.registry.mapped` Declarative Decorator - * :ref:`Declarative Table ` - combine :meth:`_orm.registry.mapped` - with ``__tablename__`` - * Imperative Table (Hybrid) - combine :meth:`_orm.registry.mapped` with ``__table__`` - * :ref:`orm_declarative_dataclasses` - * :ref:`orm_declarative_dataclasses_imperative_table` - * :ref:`orm_declarative_dataclasses_declarative_table` - * :ref:`orm_declarative_attrs_imperative_table` -* :ref:`Imperative (a.k.a. "classical" mapping) ` - * Using :meth:`_orm.registry.map_imperatively` - * :ref:`orm_imperative_dataclasses` - .. _orm_declarative_mapping: Declarative Mapping -=================== +------------------- The **Declarative Mapping** is the typical way that mappings are constructed in modern SQLAlchemy. The most common pattern @@ -73,11 +83,10 @@ Above, the :func:`_orm.declarative_base` callable returns a new base class from which new classes to be mapped may inherit from, as above a new mapped class ``User`` is constructed. -The base class refers to a -:class:`_orm.registry` object that maintains a collection of related mapped -classes. The :func:`_orm.declarative_base` function is in fact shorthand -for first creating the registry with the :class:`_orm.registry` -constructor, and then generating a base class using the +The base class refers to a :class:`_orm.registry` object that maintains a +collection of related mapped classes. The :func:`_orm.declarative_base` +function is in fact shorthand for first creating the registry with the +:class:`_orm.registry` constructor, and then generating a base class using the :meth:`_orm.registry.generate_base` method:: from sqlalchemy.orm import registry @@ -87,9 +96,7 @@ constructor, and then generating a base class using the mapper_registry = registry() Base = mapper_registry.generate_base() -The :class:`_orm.registry` is used directly in order to access a variety -of mapping styles to suit different use cases. The primary mapping styles -offered by :class:`_orm.registry` are further detailed in the following +The major Declarative mapping styles are further detailed in the following sections: * :ref:`orm_declarative_generated_base_class` - declarative mapping using a @@ -98,25 +105,25 @@ sections: * :ref:`orm_declarative_decorator` - declarative mapping using a decorator, rather than a base class. -* :ref:`orm_imperative_mapping` - imperative mapping, specifying all mapping - arguments directly rather than scanning a class. +Within the scope of a Declarative mapped class, there are also two varieties +of how the :class:`_schema.Table` metadata may be declared. These include: -Documentation for Declarative mapping continues at :ref:`declarative_config_toplevel`. +* :ref:`orm_declarative_table` - individual :class:`_schema.Column` definitions + are combined with a table name and additional arguments, where the Declarative + mapping process will construct a :class:`_schema.Table` object to be mapped. -.. seealso:: +* :ref:`orm_imperative_table_configuration` - Instead of specifying table name + and attributes separately, an explicitly constructed :class:`_schema.Table` object + is associated with a class that is otherwise mapped declaratively. This + style of mapping is a hybrid of "declarative" and "imperative" mapping. - * :ref:`declarative_config_toplevel` - - * :ref:`orm_declarative_styles_toplevel` - * :ref:`orm_declarative_table_config_toplevel` - * :ref:`orm_declarative_mapper_config_toplevel` - -.. _orm_imperative_mapping: +Documentation for Declarative mapping continues at :ref:`declarative_config_toplevel`. .. _classical_mapping: +.. _orm_imperative_mapping: -Imperative (a.k.a. Classical) Mappings -====================================== +Imperative Mapping +------------------- An **imperative** or **classical** mapping refers to the configuration of a mapped class using the :meth:`_orm.registry.map_imperatively` method, @@ -186,83 +193,10 @@ user-defined class, linked together with a :func:`.mapper`. When we talk about as well - it's still used, just behind the scenes. - - -.. _orm_imperative_dataclasses: - -Imperative Mapping with Dataclasses and Attrs ---------------------------------------------- - -As described in the section :ref:`orm_declarative_dataclasses`, the -``@dataclass`` decorator and the ``attrs`` library both work as class -decorators that are applied to a class first, before it is passed to -SQLAlchemy for mapping. Just like we can use the -:meth:`_orm.registry.mapped` decorator in order to apply declarative-style -mapping to the class, we can also pass it to the :meth:`_orm.registry.map_imperatively` -method so that we may pass all :class:`_schema.Table` and :class:`_orm.Mapper` -configuration imperatively to the function rather than having them defined -on the class itself as declarative class variables:: - - from __future__ import annotations - - from dataclasses import dataclass - from dataclasses import field - from typing import List - - from sqlalchemy import Column - from sqlalchemy import ForeignKey - from sqlalchemy import Integer - from sqlalchemy import MetaData - from sqlalchemy import String - from sqlalchemy import Table - from sqlalchemy.orm import registry - from sqlalchemy.orm import relationship - - mapper_registry = registry() - - @dataclass - class User: - id: int = field(init=False) - name: str = None - fullname: str = None - nickname: str = None - addresses: List[Address] = field(default_factory=list) - - @dataclass - class Address: - id: int = field(init=False) - user_id: int = field(init=False) - email_address: str = None - - metadata_obj = MetaData() - - user = Table( - 'user', - metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), - ) - - address = Table( - 'address', - metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), - ) - - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) - - mapper_registry.map_imperatively(Address, address) - .. _orm_mapper_configuration_overview: -Mapper Configuration Overview -============================= +Mapped Class Essential Components +================================== With all mapping forms, the mapping of the class can be configured in many ways by passing construction arguments that become @@ -344,27 +278,17 @@ to :meth:`_orm.registry.map_imperatively`, which will pass it along to the Other mapper configuration parameters ------------------------------------- -These flags are documented at :func:`_orm.mapper`. - When mapping with the :ref:`declarative ` mapping style, additional mapper configuration arguments are configured via the -``__mapper_args__`` class attribute, documented at -:ref:`orm_declarative_mapper_options` +``__mapper_args__`` class attribute. Examples of use are available +at :ref:`orm_declarative_mapper_options`. When mapping with the :ref:`imperative ` style, keyword arguments are passed to the to :meth:`_orm.registry.map_imperatively` method which passes them along to the :func:`_orm.mapper` function. +The full range of parameters accepted are documented at :class:`_orm.mapper`. -.. [1] When running under Python 2, a Python 2 "old style" class is the only - kind of class that isn't compatible. When running code on Python 2, - all classes must extend from the Python ``object`` class. Under - Python 3 this is always the case. - -.. [2] There is a legacy feature known as a "non primary mapper", where - additional :class:`_orm.Mapper` objects may be associated with a class - that's already mapped, however they don't apply instrumentation - to the class. This feature is deprecated as of SQLAlchemy 1.3. Mapped Class Behavior @@ -422,15 +346,17 @@ The constructor also applies to imperative mappings:: mapper_registry.map_imperatively(User, user_table) -The above class, mapped imperatively as described at :ref:`classical_mapping`, +The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`, will also feature the default constructor associated with the :class:`_orm.registry`. .. versionadded:: 1.4 classical mappings now support a standard configuration-level constructor when they are mapped via the :meth:`_orm.registry.map_imperatively` method. -Runtime Introspection of Mapped classes and Mappers ---------------------------------------------------- +.. _orm_mapper_inspection: + +Runtime Introspection of Mapped classes, Instances and Mappers +--------------------------------------------------------------- A class that is mapped using :class:`_orm.registry` will also feature a few attributes that are common to all mappings: @@ -450,12 +376,12 @@ attributes that are common to all mappings: .. * The ``__table__`` attribute will refer to the :class:`_schema.Table`, or - more generically to the :class:`_schema.FromClause` object, to which the + more generically to the :class:`.FromClause` object, to which the class is mapped:: table = User.__table__ - This :class:`_schema.FromClause` is also what's returned when using the + This :class:`.FromClause` is also what's returned when using the :attr:`_orm.Mapper.local_table` attribute of the :class:`_orm.Mapper`:: table = inspect(User).local_table @@ -470,8 +396,10 @@ attributes that are common to all mappings: .. -Mapper Inspection Features --------------------------- +.. _orm_mapper_inspection_mapper: + +Inspection of Mapper objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As illustrated in the previous section, the :class:`_orm.Mapper` object is available from any mapped class, regardless of method, using the @@ -514,8 +442,90 @@ As well as :attr:`_orm.Mapper.column_attrs`:: .. seealso:: - :ref:`core_inspection_toplevel` + :class:`.Mapper` + +.. _orm_mapper_inspection_instancestate: + +Inspection of Mapped Instances +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The :func:`_sa.inspect` function also provides information about instances +of a mapped class. When applied to an instance of a mapped class, rather +than the class itself, the object returned is known as :class:`.InstanceState`, +which will provide links to not only the :class:`.Mapper` in use by the +class, but also a detailed interface that provides information on the state +of individual attributes within the instance including their current value +and how this relates to what their database-loaded value is. + +Given an instance of the ``User`` class loaded from the database:: + + >>> u1 = session.scalars(select(User)).first() + +The :func:`_sa.inspect` function will return to us an :class:`.InstanceState` +object:: + + >>> insp = inspect(u1) + >>> insp + + +With this object we can see elements such as the :class:`.Mapper`:: + + >>> insp.mapper + + +The :class:`_orm.Session` to which the object is :term:`attached`, if any:: + + >>> insp.session + + +Information about the current :ref:`persistence state ` +for the object:: + + >>> insp.persistent + True + >>> insp.pending + False - :class:`_orm.Mapper` +Attribute state information such as attributes that have not been loaded or +:term:`lazy loaded` (assume ``addresses`` refers to a :func:`_orm.relationship` +on the mapped class to a related class):: + + >>> insp.unloaded + {'addresses'} + +Information regarding the current in-Python status of attributes, such as +attributes that have not been modified since the last flush:: + + >>> insp.unmodified + {'nickname', 'name', 'fullname', 'id'} + +as well as specific history on modifications to attributes since the last flush:: + + >>> insp.attrs.nickname.value + 'nickname' + >>> u1.nickname = 'new nickname' + >>> insp.attrs.nickname.history + History(added=['new nickname'], unchanged=(), deleted=['nickname']) + +.. seealso:: :class:`.InstanceState` + + :attr:`.InstanceState.attrs` + + :class:`.AttributeState` + + +.. _dataclasses: https://docs.python.org/3/library/dataclasses.html +.. _attrs: https://pypi.org/project/attrs/ + +.. [1] When running under Python 2, a Python 2 "old style" class is the only + kind of class that isn't compatible. When running code on Python 2, + all classes must extend from the Python ``object`` class. Under + Python 3 this is always the case. + +.. [2] There is a legacy feature known as a "non primary mapper", where + additional :class:`_orm.Mapper` objects may be associated with a class + that's already mapped, however they don't apply instrumentation + to the class. This feature is deprecated as of SQLAlchemy 1.3. + diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 47b4fbe7fd8..31e82ab62cb 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -50,7 +50,19 @@ Getting the Current State of an Object ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The actual state of any mapped object can be viewed at any time using -the :func:`_sa.inspect` system:: +the :func:`_sa.inspect` function on a mapped instance; this function will +return the corresponding :class:`.InstanceState` object which manages the +internal ORM state for the object. :class:`.InstanceState` provides, among +other accessors, boolean attributes indicating the persistence state +of the object, including: + +* :attr:`.InstanceState.transient` +* :attr:`.InstanceState.pending` +* :attr:`.InstanceState.persistent` +* :attr:`.InstanceState.deleted` +* :attr:`.InstanceState.detached` + +E.g.:: >>> from sqlalchemy import inspect >>> insp = inspect(my_object) @@ -59,15 +71,8 @@ the :func:`_sa.inspect` system:: .. seealso:: - :attr:`.InstanceState.transient` - - :attr:`.InstanceState.pending` - - :attr:`.InstanceState.persistent` - - :attr:`.InstanceState.deleted` - - :attr:`.InstanceState.detached` + :ref:`orm_mapper_inspection_instancestate` - further examples of + :class:`.InstanceState` .. _session_attributes: diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index 9502b09e807..a5d7267c211 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -9,8 +9,6 @@ which automatically generates mapped classes and relationships from a database schema, typically though not necessarily one which is reflected. -.. versionadded:: 0.9.1 Added :mod:`sqlalchemy.ext.automap`. - It is hoped that the :class:`.AutomapBase` system provides a quick and modernized solution to the problem that the very famous `SQLSoup `_ @@ -21,6 +19,15 @@ a well-integrated approach to the issue of expediently auto-generating ad-hoc mappings. +.. tip:: The :ref:`automap_toplevel` extension is geared towards a + "zero declaration" approach, where a complete ORM model including classes + and pre-named relationships can be generated on the fly from a database + schema. For applications that still want to use explicit class declarations + including explicit relationship definitions in conjunction with reflection + of tables, the :class:`.DeferredReflection` class, described at + :ref:`orm_declarative_reflected_deferred_reflection`, is a better choice. + + Basic Use ========= @@ -121,6 +128,9 @@ Specifying Classes Explicitly ============================= +.. tip:: If explicit classes are expected to be prominent in an application, + consider using :class:`.DeferredReflection` instead. + The :mod:`.sqlalchemy.ext.automap` extension allows classes to be defined explicitly, in a way similar to that of the :class:`.DeferredReflection` class. Classes that extend from :class:`.AutomapBase` act like regular declarative diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 6286091b1d5..78188416aa3 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -380,6 +380,11 @@ class YetAnotherClass(ReflectedTwo): ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) + .. seealso:: + + :ref:`orm_declarative_reflected_deferred_reflection` - in the + :ref:`orm_declarative_table_config_toplevel` section. + """ @classmethod diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 21809e7e6ea..ed221a964a0 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -205,10 +205,29 @@ class will overwrite all data within object instances that already :param column_prefix: A string which will be prepended to the mapped attribute name when :class:`_schema.Column` objects are automatically assigned as attributes to the - mapped class. Does not affect explicitly specified - column-based properties. - - See the section :ref:`column_prefix` for an example. + mapped class. Does not affect :class:`.Column` objects that + are mapped explicitly in the :paramref:`.mapper.properties` + dictionary. + + This parameter is typically useful with imperative mappings + that keep the :class:`.Table` object separate. Below, assuming + the ``user_table`` :class:`.Table` object has columns named + ``user_id``, ``user_name``, and ``password``:: + + class User(Base): + __table__ = user_table + __mapper_args__ = {'column_prefix':'_'} + + The above mapping will assign the ``user_id``, ``user_name``, and + ``password`` columns to attributes named ``_user_id``, + ``_user_name``, and ``_password`` on the mapped ``User`` class. + + The :paramref:`.mapper.column_prefix` parameter is uncommon in + modern use. For dealing with reflected tables, a more flexible + approach to automating a naming scheme is to intercept the + :class:`.Column` objects as they are reflected; see the section + :ref:`mapper_automated_reflection_schemes` for notes on this usage + pattern. :param concrete: If True, indicates this mapper should use concrete table inheritance with its parent mapper. @@ -496,12 +515,21 @@ def set_identity(instance, *arg, **kw): based on all those :class:`.MapperProperty` instances declared in the declared class body. + .. seealso:: + + :ref:`orm_mapping_properties` - in the + :ref:`orm_mapping_classes_toplevel` + :param primary_key: A list of :class:`_schema.Column` objects which define the primary key to be used against this mapper's selectable unit. This is normally simply the primary key of the ``local_table``, but can be overridden here. + .. seealso:: + + :ref:`mapper_primary_key` - background and example use + :param version_id_col: A :class:`_schema.Column` that will be used to keep a running version id of rows in the table. This is used to detect concurrent updates or diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 3bb59277983..9718024292f 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -58,10 +58,12 @@ class InstanceState(interfaces.InspectionAttrInfo): >>> from sqlalchemy import inspect >>> insp = inspect(some_mapped_object) + >>> insp.attrs.nickname.history + History(added=['new nickname'], unchanged=(), deleted=['nickname']) .. seealso:: - :ref:`core_inspection_toplevel` + :ref:`orm_mapper_inspection_instancestate` """ From d39415a3c68f5278f04998b8b0d1fb309c0df461 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 21 Jun 2022 16:41:06 -0400 Subject: [PATCH 280/632] post-edits for ORM rework amends f9f1e8b6c5890eb17b6ba055ff563 Change-Id: I2fd6de56db82aad4d22abf5807e5849e48693124 (cherry picked from commit 8fe6a3fa550f18068e7566e25a9e96f60a186430) --- doc/build/orm/dataclasses.rst | 2 +- doc/build/orm/declarative_tables.rst | 2 +- doc/build/orm/mapping_styles.rst | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst index 4c1c91285e1..fa37e011e58 100644 --- a/doc/build/orm/dataclasses.rst +++ b/doc/build/orm/dataclasses.rst @@ -360,7 +360,7 @@ The SQLAlchemy ORM supports mapping an attrs_ class using **Declarative with Imperative Table** or **Imperative** mapping. The general form of these two styles is fully equivalent to the :ref:`orm_declarative_dataclasses_declarative_table` and -:ref:`orm_declarative_attrs_imperative_table` mapping forms used with +:ref:`orm_declarative_dataclasses_imperative_table` mapping forms used with dataclasses, where the inline attribute directives used by dataclasses or attrs are unchanged, and SQLAlchemy's table-oriented instrumentation is applied at runtime. diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index b8804a21d58..dad145c42f5 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -192,7 +192,7 @@ Appending additional columns to an existing Declarative mapped class ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ A declarative table configuration allows the addition of new -:class:`_schema.Column` objects an existing mapping after the :class:`.Table` +:class:`_schema.Column` objects to an existing mapping after the :class:`.Table` metadata has already been generated. For a declarative class that is declared using a declarative base class, diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index edacadf71d1..1b33aa2e29f 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -517,7 +517,6 @@ as well as specific history on modifications to attributes since the last flush: .. _dataclasses: https://docs.python.org/3/library/dataclasses.html -.. _attrs: https://pypi.org/project/attrs/ .. [1] When running under Python 2, a Python 2 "old style" class is the only kind of class that isn't compatible. When running code on Python 2, From 9ef3d97922c303514ab6f14ea00d56354d3b5548 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 22 Jun 2022 18:50:35 -0400 Subject: [PATCH 281/632] sub-categorize special function forms this is the tutorial, which should have some semblence of not getting too far into the weeds. however, as we dont really have other places to explain SQL concepts, and SQL functions have a lot of them, we dont have another home right now. so at least further sub-categorize window functions, table/column valued functions, and WITHIN GROUP into an "advanced function techniques" section with a disclaimer that these are less common use cases. Change-Id: I4b16bd5673c0bd39a9b880338f6ce3cfbafbe271 (cherry picked from commit c7519e94b5f7bfd9cee908f80c793d628bc37a1c) --- doc/build/tutorial/data_select.rst | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index 78a0f174618..eab9dccefd0 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -1534,10 +1534,19 @@ Overall, the scenario where the or again special datatypes such as :class:`_types.JSON`, :class:`_types.ARRAY`. +Advanced SQL Function Techniques +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The following subsections illustrate more things that can be done with +SQL functions. While these techniques are less common and more advanced than +basic SQL function use, they nonetheless are extremely popular, largely +as a result of PostgreSQL's emphasis on more complex function forms, including +table- and column-valued forms that are popular with JSON data. + .. _tutorial_window_functions: Using Window Functions -~~~~~~~~~~~~~~~~~~~~~~ +###################### A window function is a special use of a SQL aggregate function which calculates the aggregate value over the rows being returned in a group as the individual @@ -1613,7 +1622,7 @@ Further options for window functions include usage of ranges; see .. _tutorial_functions_within_group: Special Modifiers WITHIN GROUP, FILTER -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +###################################### The "WITHIN GROUP" SQL syntax is used in conjunction with an "ordered set" or a "hypothetical set" aggregate @@ -1653,7 +1662,7 @@ using the :meth:`_functions.FunctionElement.filter` method:: .. _tutorial_functions_table_valued: Table-Valued Functions -~~~~~~~~~~~~~~~~~~~~~~~~~ +####################### Table-valued SQL functions support a scalar representation that contains named sub-elements. Often used for JSON and ARRAY-oriented functions as well as @@ -1708,7 +1717,7 @@ towards as ``value``, and then selected two of its three rows. .. _tutorial_functions_column_valued: Column Valued Functions - Table Valued Function as a Scalar Column -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +################################################################## A special syntax supported by PostgreSQL and Oracle is that of referring towards a function in the FROM clause, which then delivers itself as a From c4ee403e139d523ac554fcbb24a924a26f5261bd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 23 Jun 2022 11:15:19 -0400 Subject: [PATCH 282/632] refine _include_fn to not include sibling mappers Fixed regression caused by :ticket:`8064` where a particular check for column correspondence was made too liberal, resulting in incorrect rendering for some ORM subqueries such as those using :meth:`.PropComparator.has` or :meth:`.PropComparator.any` in conjunction with joined-inheritance queries that also use legacy aliasing features. Fixes: #8162 Change-Id: Ib1fff33aa219aadf178348dd571bec1e691e606d (cherry picked from commit 4553f24980c0a23685afdb9ef6958b79f4b22e70) --- doc/build/changelog/unreleased_14/8162.rst | 9 ++++++ lib/sqlalchemy/orm/util.py | 3 +- test/orm/inheritance/_poly_fixtures.py | 3 +- test/orm/test_deprecations.py | 33 ++++++++++++++++++++++ 4 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8162.rst diff --git a/doc/build/changelog/unreleased_14/8162.rst b/doc/build/changelog/unreleased_14/8162.rst new file mode 100644 index 00000000000..4b59155e21b --- /dev/null +++ b/doc/build/changelog/unreleased_14/8162.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 8162 + + Fixed regression caused by :ticket:`8064` where a particular check for + column correspondence was made too liberal, resulting in incorrect + rendering for some ORM subqueries such as those using + :meth:`.PropComparator.has` or :meth:`.PropComparator.any` in conjunction + with joined-inheritance queries that also use legacy aliasing features. diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index f95af41d24a..56aa9ff6c74 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -415,7 +415,8 @@ def __init__( def _include_fn(self, elem): entity = elem._annotations.get("parentmapper", None) - return not entity or entity.common_parent(self.mapper) + + return not entity or entity.isa(self.mapper) or self.mapper.isa(entity) class AliasedClass(object): diff --git a/test/orm/inheritance/_poly_fixtures.py b/test/orm/inheritance/_poly_fixtures.py index 7efc99913ad..7ba611f958c 100644 --- a/test/orm/inheritance/_poly_fixtures.py +++ b/test/orm/inheritance/_poly_fixtures.py @@ -350,9 +350,10 @@ def setup_mappers(cls): inherits=Person, polymorphic_identity="engineer", properties={ + "company": relationship(Company, viewonly=True), "machines": relationship( Machine, order_by=machines.c.machine_id - ) + ), }, ) diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 33a91184c26..05bfdf26dab 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -6416,6 +6416,39 @@ class InheritedJoinTest( run_setup_mappers = "once" __dialect__ = "default" + def test_join_w_subq_adapt(self): + """test #8162""" + + Company, Manager, Engineer = self.classes( + "Company", "Manager", "Engineer" + ) + + sess = fixture_session() + + with _aliased_join_warning(): + self.assert_compile( + sess.query(Engineer) + .join(Company, Company.company_id == Engineer.company_id) + .outerjoin(Manager, Company.company_id == Manager.company_id) + .filter(~Engineer.company.has()), + "SELECT engineers.person_id AS engineers_person_id, " + "people.person_id AS people_person_id, " + "people.company_id AS people_company_id, " + "people.name AS people_name, people.type AS people_type, " + "engineers.status AS engineers_status, " + "engineers.engineer_name AS engineers_engineer_name, " + "engineers.primary_language AS engineers_primary_language " + "FROM people JOIN engineers " + "ON people.person_id = engineers.person_id " + "JOIN companies ON companies.company_id = people.company_id " + "LEFT OUTER JOIN (people AS people_1 JOIN managers AS " + "managers_1 ON people_1.person_id = managers_1.person_id) " + "ON companies.company_id = people_1.company_id " + "WHERE NOT (EXISTS (SELECT 1 FROM companies " + "WHERE companies.company_id = people.company_id))", + use_default_dialect=True, + ) + def test_load_only_alias_subclass(self): Manager = self.classes.Manager From 9876d59f4b63eefb1d64fa9f14c42cccc6aab1e9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 23 Jun 2022 12:21:51 -0400 Subject: [PATCH 283/632] changelog fixes Change-Id: Ibeba5cf159ed2d80bdea22f300c09a8d18c56593 --- doc/build/changelog/unreleased_14/8100.rst | 2 +- doc/build/changelog/unreleased_14/8133.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8100.rst b/doc/build/changelog/unreleased_14/8100.rst index 7c5fc49aa82..250dfedd593 100644 --- a/doc/build/changelog/unreleased_14/8100.rst +++ b/doc/build/changelog/unreleased_14/8100.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, reflection + :tags: bug, schema :tickets: 8100, 8101 Fixed bugs involving the :paramref:`.Table.include_columns` and the diff --git a/doc/build/changelog/unreleased_14/8133.rst b/doc/build/changelog/unreleased_14/8133.rst index 36da8ad8e6c..bf5b10df6d2 100644 --- a/doc/build/changelog/unreleased_14/8133.rst +++ b/doc/build/changelog/unreleased_14/8133.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, ext + :tags: bug, extensions :tickets: 8133 Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM From 704b7d4c2e46c38fd29b6248597cf3e4ec3f6832 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 23 Jun 2022 12:22:46 -0400 Subject: [PATCH 284/632] - 1.4.38 --- doc/build/changelog/changelog_14.rst | 134 ++++++++++++++++++++- doc/build/changelog/unreleased_14/8084.rst | 10 -- doc/build/changelog/unreleased_14/8091.rst | 6 - doc/build/changelog/unreleased_14/8098.rst | 16 --- doc/build/changelog/unreleased_14/8100.rst | 30 ----- doc/build/changelog/unreleased_14/8109.rst | 12 -- doc/build/changelog/unreleased_14/8111.rst | 11 -- doc/build/changelog/unreleased_14/8113.rst | 12 -- doc/build/changelog/unreleased_14/8115.rst | 9 -- doc/build/changelog/unreleased_14/8133.rst | 7 -- doc/build/changelog/unreleased_14/8162.rst | 9 -- doc/build/conf.py | 4 +- 12 files changed, 135 insertions(+), 125 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8084.rst delete mode 100644 doc/build/changelog/unreleased_14/8091.rst delete mode 100644 doc/build/changelog/unreleased_14/8098.rst delete mode 100644 doc/build/changelog/unreleased_14/8100.rst delete mode 100644 doc/build/changelog/unreleased_14/8109.rst delete mode 100644 doc/build/changelog/unreleased_14/8111.rst delete mode 100644 doc/build/changelog/unreleased_14/8113.rst delete mode 100644 doc/build/changelog/unreleased_14/8115.rst delete mode 100644 doc/build/changelog/unreleased_14/8133.rst delete mode 100644 doc/build/changelog/unreleased_14/8162.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 61e2fad816b..7e7cc179264 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,139 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.38 - :include_notes_from: unreleased_14 + :released: June 23, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 8162 + + Fixed regression caused by :ticket:`8064` where a particular check for + column correspondence was made too liberal, resulting in incorrect + rendering for some ORM subqueries such as those using + :meth:`.PropComparator.has` or :meth:`.PropComparator.any` in conjunction + with joined-inheritance queries that also use legacy aliasing features. + + .. change:: + :tags: bug, engine + :tickets: 8115 + + Repaired a deprecation warning class decorator that was preventing key + objects such as :class:`_engine.Connection` from having a proper + ``__weakref__`` attribute, causing operations like Python standard library + ``inspect.getmembers()`` to fail. + + + .. change:: + :tags: bug, sql + :tickets: 8098 + + Fixed multiple observed race conditions related to :func:`.lambda_stmt`, + including an initial "dogpile" issue when a new Python code object is + initially analyzed among multiple simultaneous threads which created both a + performance issue as well as some internal corruption of state. + Additionally repaired observed race condition which could occur when + "cloning" an expression construct that is also in the process of being + compiled or otherwise accessed in a different thread due to memoized + attributes altering the ``__dict__`` while iterated, for Python versions + prior to 3.10; in particular the lambda SQL construct is sensitive to this + as it holds onto a single statement object persistently. The iteration has + been refined to use ``dict.copy()`` with or without an additional iteration + instead. + + .. change:: + :tags: bug, sql + :tickets: 8084 + + Enhanced the mechanism of :class:`.Cast` and other "wrapping" + column constructs to more fully preserve a wrapped :class:`.Label` + construct, including that the label name will be preserved in the + ``.c`` collection of a :class:`.Subquery`. The label was already + able to render in the SQL correctly on the outside of the construct + which it was wrapped inside. + + .. change:: + :tags: bug, orm, sql + :tickets: 8091 + + Fixed an issue where :meth:`_sql.GenerativeSelect.fetch` would not + be applied when executing a statement using the ORM. + + .. change:: + :tags: bug, orm + :tickets: 8109 + + Fixed issue where a :func:`_orm.with_loader_criteria` option could not be + pickled, as is necessary when it is carried along for propagation to lazy + loaders in conjunction with a caching scheme. Currently, the only form that + is supported as picklable is to pass the "where criteria" as a fixed + module-level callable function that produces a SQL expression. An ad-hoc + "lambda" can't be pickled, and a SQL expression object is usually not fully + picklable directly. + + + .. change:: + :tags: bug, schema + :tickets: 8100, 8101 + + Fixed bugs involving the :paramref:`.Table.include_columns` and the + :paramref:`.Table.resolve_fks` parameters on :class:`.Table`; these + little-used parameters were apparently not working for columns that refer + to foreign key constraints. + + In the first case, not-included columns that refer to foreign keys would + still attempt to create a :class:`.ForeignKey` object, producing errors + when attempting to resolve the columns for the foreign key constraint + within reflection; foreign key constraints that refer to skipped columns + are now omitted from the table reflection process in the same way as + occurs for :class:`.Index` and :class:`.UniqueConstraint` objects with the + same conditions. No warning is produced however, as we likely want to + remove the include_columns warnings for all constraints in 2.0. + + In the latter case, the production of table aliases or subqueries would + fail on an FK related table not found despite the presence of + ``resolve_fks=False``; the logic has been repaired so that if a related + table is not found, the :class:`.ForeignKey` object is still proxied to the + aliased table or subquery (these :class:`.ForeignKey` objects are normally + used in the production of join conditions), but it is sent with a flag that + it's not resolvable. The aliased table / subquery will then work normally, + with the exception that it cannot be used to generate a join condition + automatically, as the foreign key information is missing. This was already + the behavior for such foreign key constraints produced using non-reflection + methods, such as joining :class:`.Table` objects from different + :class:`.MetaData` collections. + + .. change:: + :tags: bug, sql + :tickets: 8113 + + Adjusted the fix made for :ticket:`8056` which adjusted the escaping of + bound parameter names with special characters such that the escaped names + were translated after the SQL compilation step, which broke a published + recipe on the FAQ illustrating how to merge parameter names into the string + output of a compiled SQL string. The change restores the escaped names that + come from ``compiled.params`` and adds a conditional parameter to + :meth:`.SQLCompiler.construct_params` named ``escape_names`` that defaults + to ``True``, restoring the old behavior by default. + + .. change:: + :tags: bug, schema, mssql + :tickets: 8111 + + Fixed issue where :class:`.Table` objects that made use of IDENTITY columns + with a :class:`.Numeric` datatype would produce errors when attempting to + reconcile the "autoincrement" column, preventing construction of the + :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter + as well as emitting errors when attempting to invoke an :class:`.Insert` + construct. + + + .. change:: + :tags: bug, extensions + :tickets: 8133 + + Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM + mapped instance would not correctly restore state for mappings that + contained multiple :class:`.Mutable`-enabled attributes. .. changelog:: :version: 1.4.37 diff --git a/doc/build/changelog/unreleased_14/8084.rst b/doc/build/changelog/unreleased_14/8084.rst deleted file mode 100644 index 43095e8c938..00000000000 --- a/doc/build/changelog/unreleased_14/8084.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8084 - - Enhanced the mechanism of :class:`.Cast` and other "wrapping" - column constructs to more fully preserve a wrapped :class:`.Label` - construct, including that the label name will be preserved in the - ``.c`` collection of a :class:`.Subquery`. The label was already - able to render in the SQL correctly on the outside of the construct - which it was wrapped inside. diff --git a/doc/build/changelog/unreleased_14/8091.rst b/doc/build/changelog/unreleased_14/8091.rst deleted file mode 100644 index 014f66a56a1..00000000000 --- a/doc/build/changelog/unreleased_14/8091.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, orm, sql - :tickets: 8091 - - Fixed an issue where :meth:`_sql.GenerativeSelect.fetch` would not - be applied when executing a statement using the ORM. diff --git a/doc/build/changelog/unreleased_14/8098.rst b/doc/build/changelog/unreleased_14/8098.rst deleted file mode 100644 index 0267817abd6..00000000000 --- a/doc/build/changelog/unreleased_14/8098.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8098 - - Fixed multiple observed race conditions related to :func:`.lambda_stmt`, - including an initial "dogpile" issue when a new Python code object is - initially analyzed among multiple simultaneous threads which created both a - performance issue as well as some internal corruption of state. - Additionally repaired observed race condition which could occur when - "cloning" an expression construct that is also in the process of being - compiled or otherwise accessed in a different thread due to memoized - attributes altering the ``__dict__`` while iterated, for Python versions - prior to 3.10; in particular the lambda SQL construct is sensitive to this - as it holds onto a single statement object persistently. The iteration has - been refined to use ``dict.copy()`` with or without an additional iteration - instead. diff --git a/doc/build/changelog/unreleased_14/8100.rst b/doc/build/changelog/unreleased_14/8100.rst deleted file mode 100644 index 250dfedd593..00000000000 --- a/doc/build/changelog/unreleased_14/8100.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. change:: - :tags: bug, schema - :tickets: 8100, 8101 - - Fixed bugs involving the :paramref:`.Table.include_columns` and the - :paramref:`.Table.resolve_fks` parameters on :class:`.Table`; these - little-used parameters were apparently not working for columns that refer - to foreign key constraints. - - In the first case, not-included columns that refer to foreign keys would - still attempt to create a :class:`.ForeignKey` object, producing errors - when attempting to resolve the columns for the foreign key constraint - within reflection; foreign key constraints that refer to skipped columns - are now omitted from the table reflection process in the same way as - occurs for :class:`.Index` and :class:`.UniqueConstraint` objects with the - same conditions. No warning is produced however, as we likely want to - remove the include_columns warnings for all constraints in 2.0. - - In the latter case, the production of table aliases or subqueries would - fail on an FK related table not found despite the presence of - ``resolve_fks=False``; the logic has been repaired so that if a related - table is not found, the :class:`.ForeignKey` object is still proxied to the - aliased table or subquery (these :class:`.ForeignKey` objects are normally - used in the production of join conditions), but it is sent with a flag that - it's not resolvable. The aliased table / subquery will then work normally, - with the exception that it cannot be used to generate a join condition - automatically, as the foreign key information is missing. This was already - the behavior for such foreign key constraints produced using non-reflection - methods, such as joining :class:`.Table` objects from different - :class:`.MetaData` collections. diff --git a/doc/build/changelog/unreleased_14/8109.rst b/doc/build/changelog/unreleased_14/8109.rst deleted file mode 100644 index cf64d21ac4a..00000000000 --- a/doc/build/changelog/unreleased_14/8109.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8109 - - Fixed issue where a :func:`_orm.with_loader_criteria` option could not be - pickled, as is necessary when it is carried along for propagation to lazy - loaders in conjunction with a caching scheme. Currently, the only form that - is supported as picklable is to pass the "where criteria" as a fixed - module-level callable function that produces a SQL expression. An ad-hoc - "lambda" can't be pickled, and a SQL expression object is usually not fully - picklable directly. - diff --git a/doc/build/changelog/unreleased_14/8111.rst b/doc/build/changelog/unreleased_14/8111.rst deleted file mode 100644 index ac43297027d..00000000000 --- a/doc/build/changelog/unreleased_14/8111.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, schema, mssql - :tickets: 8111 - - Fixed issue where :class:`.Table` objects that made use of IDENTITY columns - with a :class:`.Numeric` datatype would produce errors when attempting to - reconcile the "autoincrement" column, preventing construction of the - :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter - as well as emitting errors when attempting to invoke an :class:`.Insert` - construct. - diff --git a/doc/build/changelog/unreleased_14/8113.rst b/doc/build/changelog/unreleased_14/8113.rst deleted file mode 100644 index 100f9a731f0..00000000000 --- a/doc/build/changelog/unreleased_14/8113.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8113 - - Adjusted the fix made for :ticket:`8056` which adjusted the escaping of - bound parameter names with special characters such that the escaped names - were translated after the SQL compilation step, which broke a published - recipe on the FAQ illustrating how to merge parameter names into the string - output of a compiled SQL string. The change restores the escaped names that - come from ``compiled.params`` and adds a conditional parameter to - :meth:`.SQLCompiler.construct_params` named ``escape_names`` that defaults - to ``True``, restoring the old behavior by default. diff --git a/doc/build/changelog/unreleased_14/8115.rst b/doc/build/changelog/unreleased_14/8115.rst deleted file mode 100644 index 856a76a42ec..00000000000 --- a/doc/build/changelog/unreleased_14/8115.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 8115 - - Repaired a deprecation warning class decorator that was preventing key - objects such as :class:`_engine.Connection` from having a proper - ``__weakref__`` attribute, causing operations like Python standard library - ``inspect.getmembers()`` to fail. - diff --git a/doc/build/changelog/unreleased_14/8133.rst b/doc/build/changelog/unreleased_14/8133.rst deleted file mode 100644 index bf5b10df6d2..00000000000 --- a/doc/build/changelog/unreleased_14/8133.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, extensions - :tickets: 8133 - - Fixed bug in :class:`.Mutable` where pickling and unpickling of an ORM - mapped instance would not correctly restore state for mappings that - contained multiple :class:`.Mutable`-enabled attributes. diff --git a/doc/build/changelog/unreleased_14/8162.rst b/doc/build/changelog/unreleased_14/8162.rst deleted file mode 100644 index 4b59155e21b..00000000000 --- a/doc/build/changelog/unreleased_14/8162.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 8162 - - Fixed regression caused by :ticket:`8064` where a particular check for - column correspondence was made too liberal, resulting in incorrect - rendering for some ORM subqueries such as those using - :meth:`.PropComparator.has` or :meth:`.PropComparator.any` in conjunction - with joined-inheritance queries that also use legacy aliasing features. diff --git a/doc/build/conf.py b/doc/build/conf.py index 7e32d4acea0..e2bf92658b0 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.37" +release = "1.4.38" -release_date = "May 31, 2022" +release_date = "June 23, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From b32cac39907b94df4f77983e31e3f15a80d0f7e2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 23 Jun 2022 12:26:49 -0400 Subject: [PATCH 285/632] Version 1.4.39 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 7e7cc179264..805326ccc84 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.39 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.38 :released: June 23, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 91a4b0767d4..0026037943d 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.38" +__version__ = "1.4.39" def __go(lcls): From 9966bc72d41c968d7587a9e46b36c214a3ad6cf1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 24 Jun 2022 10:31:46 -0400 Subject: [PATCH 286/632] add fallback for old mutable format Fixed regression caused by :ticket:`8133` where the pickle format for mutable attributes was changed, without a fallback to recognize the old format, causing in-place upgrades of SQLAlchemy to no longer be able to read pickled data from previous versions. A check plus a fallback for the old format is now in place. Fixes: #8133 Change-Id: I9029729b4bc56c8b3145797869229eeff48a3b3b (cherry picked from commit 271c38fd15b94d8acd0e6f054c8001b22535844e) --- doc/build/changelog/unreleased_14/mutable_fix.rst | 9 +++++++++ lib/sqlalchemy/ext/mutable.py | 10 ++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/mutable_fix.rst diff --git a/doc/build/changelog/unreleased_14/mutable_fix.rst b/doc/build/changelog/unreleased_14/mutable_fix.rst new file mode 100644 index 00000000000..2c96878b86e --- /dev/null +++ b/doc/build/changelog/unreleased_14/mutable_fix.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 8133 + + Fixed regression caused by :ticket:`8133` where the pickle format for + mutable attributes was changed, without a fallback to recognize the old + format, causing in-place upgrades of SQLAlchemy to no longer be able to + read pickled data from previous versions. A check plus a fallback for the + old format is now in place. diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 934ac37a056..cbec06a31fe 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -502,8 +502,14 @@ def pickle(state, state_dict): def unpickle(state, state_dict): if "ext.mutable.values" in state_dict: - for val in state_dict["ext.mutable.values"][key]: - val._parents[state] = key + collection = state_dict["ext.mutable.values"] + if isinstance(collection, list): + # legacy format + for val in collection: + val._parents[state] = key + else: + for val in state_dict["ext.mutable.values"][key]: + val._parents[state] = key event.listen(parent_cls, "load", load, raw=True, propagate=True) event.listen( From f35a025bd1a9b649e5dedc86c8b5f3b12ffb8c9b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 24 Jun 2022 12:15:11 -0400 Subject: [PATCH 287/632] - 1.4.39 --- doc/build/changelog/changelog_14.rst | 12 +++++++++++- doc/build/changelog/unreleased_14/mutable_fix.rst | 9 --------- doc/build/conf.py | 4 ++-- 3 files changed, 13 insertions(+), 12 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/mutable_fix.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 805326ccc84..2e5ddea7c71 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,17 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.39 - :include_notes_from: unreleased_14 + :released: June 24, 2022 + + .. change:: + :tags: bug, orm, regression + :tickets: 8133 + + Fixed regression caused by :ticket:`8133` where the pickle format for + mutable attributes was changed, without a fallback to recognize the old + format, causing in-place upgrades of SQLAlchemy to no longer be able to + read pickled data from previous versions. A check plus a fallback for the + old format is now in place. .. changelog:: :version: 1.4.38 diff --git a/doc/build/changelog/unreleased_14/mutable_fix.rst b/doc/build/changelog/unreleased_14/mutable_fix.rst deleted file mode 100644 index 2c96878b86e..00000000000 --- a/doc/build/changelog/unreleased_14/mutable_fix.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 8133 - - Fixed regression caused by :ticket:`8133` where the pickle format for - mutable attributes was changed, without a fallback to recognize the old - format, causing in-place upgrades of SQLAlchemy to no longer be able to - read pickled data from previous versions. A check plus a fallback for the - old format is now in place. diff --git a/doc/build/conf.py b/doc/build/conf.py index e2bf92658b0..10dc263ac22 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.38" +release = "1.4.39" -release_date = "June 23, 2022" +release_date = "June 24, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 36e95a60fb47bdd45cfa0e893085c50160e9d06f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 24 Jun 2022 12:20:36 -0400 Subject: [PATCH 288/632] Version 1.4.40 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 2e5ddea7c71..cf221b725d8 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.40 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.39 :released: June 24, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 0026037943d..3cae9f5544c 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.39" +__version__ = "1.4.40" def __go(lcls): From 2494f2d0ec54d82a8c7e43634d2a1abfe0c53419 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 28 Jun 2022 10:18:58 -0400 Subject: [PATCH 289/632] block mariadb 1.1.2 regression causing unicode schema queries to fail Change-Id: Ie66ca038357ec90804a357979d4fd22611c47340 References: https://jira.mariadb.org/browse/CONPY-209 (cherry picked from commit f19e50ab75cfc904acef31434c92542f3ab50d61) --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index e49a1c9c20e..8ec3ffa7948 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,7 +58,7 @@ mysql = mysql_connector = mysql-connector-python mariadb_connector = - mariadb>=1.0.1;python_version>="3" + mariadb>=1.0.1,!=1.1.2;python_version>="3" oracle = cx_oracle>=7,<8;python_version<"3" cx_oracle>=7;python_version>="3" From de4adecae6e77c0677a70e807bae5ac8cc4c6314 Mon Sep 17 00:00:00 2001 From: Daniel Hall Date: Tue, 28 Jun 2022 15:03:34 -0400 Subject: [PATCH 290/632] 8152: add documentation for postgresql dialect time and timestamp types add documentation for postgresql dialect time and timestamp types This pull request is: - [ X] A documentation / typographical error fix - Good to go, no issue or tests are needed **Have a nice day!** Closes: #8185 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8185 Pull-request-sha: 2b76fe080babd72f8e5615b34cb544abbc446a28 Change-Id: Ib71b35d106d0d0686e5551f07b88486b6c59624d (cherry picked from commit 300ce85463474c0264b37c0f2a8baa5f3cf3d822) --- doc/build/dialects/postgresql.rst | 6 +++++ lib/sqlalchemy/dialects/postgresql/base.py | 26 ++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index d30c03885d5..c58aaee9b48 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -89,6 +89,12 @@ construction arguments, are as follows: .. autoclass:: REGCLASS +.. autoclass:: TIMESTAMP + :members: __init__ + +.. autoclass:: TIME + :members: __init__ + .. autoclass:: TSVECTOR .. autoclass:: UUID diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index db88d9e6a89..dbaced5db53 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1682,13 +1682,39 @@ class REGCLASS(sqltypes.TypeEngine): class TIMESTAMP(sqltypes.TIMESTAMP): + + """Provide the PostgreSQL TIMESTAMP type.""" + + __visit_name__ = "TIMESTAMP" + def __init__(self, timezone=False, precision=None): + """Construct a TIMESTAMP. + + :param timezone: boolean value if timezone present, default False + :param precision: optional integer precision value + + .. versionadded:: 1.4 + + """ super(TIMESTAMP, self).__init__(timezone=timezone) self.precision = precision class TIME(sqltypes.TIME): + + """PostgreSQL TIME type.""" + + __visit_name__ = "TIME" + def __init__(self, timezone=False, precision=None): + """Construct a TIME. + + :param timezone: boolean value if timezone present, default False + :param precision: optional integer precision value + + .. versionadded:: 1.4 + + """ super(TIME, self).__init__(timezone=timezone) self.precision = precision From 3ea6c365a2dd7060f0c6d462e2e7752c832f59af Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 28 Jun 2022 18:55:19 -0400 Subject: [PATCH 291/632] produce column copies up the whole hierarchy first Fixed issue where a hierarchy of classes set up as an abstract or mixin declarative classes could not declare standalone columns on a superclass that would then be copied correctly to a :class:`_orm.declared_attr` callable that wanted to make use of them on a descendant class. Originally it looked like this would produce an ordering change, however an adjustment to the flow for produce_column_copies has avoided that for now. Fixes: #8190 Change-Id: I4e2ee74edb110793eb42691c3e4a0e0535fba7e9 (cherry picked from commit 9d12d493eb38f958c2d50da28f83ccc6de01f0dc) --- doc/build/changelog/unreleased_14/8190.rst | 8 ++++ lib/sqlalchemy/orm/decl_base.py | 40 ++++++++++++++++-- test/orm/declarative/test_mixin.py | 47 ++++++++++++++++++++++ 3 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8190.rst diff --git a/doc/build/changelog/unreleased_14/8190.rst b/doc/build/changelog/unreleased_14/8190.rst new file mode 100644 index 00000000000..934e44cf519 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8190.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm, declarative + :tickets: 8190 + + Fixed issue where a hierarchy of classes set up as an abstract or mixin + declarative classes could not declare standalone columns on a superclass + that would then be copied correctly to a :class:`_orm.declared_attr` + callable that wanted to make use of them on a descendant class. diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index ed4ccd19682..6e1c79745fa 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -459,7 +459,14 @@ def _scan_attributes(self): attribute_is_overridden = self._cls_attr_override_checker(self.cls) + bases = [] + for base in cls.__mro__: + # collect bases and make sure standalone columns are copied + # to be the column they will ultimately be on the class, + # so that declared_attr functions use the right columns. + # need to do this all the way up the hierarchy first + # (see #8190) class_mapped = ( base is not cls @@ -472,9 +479,34 @@ def _scan_attributes(self): local_attributes_for_class = self._cls_attr_resolver(base) if not class_mapped and base is not cls: - self._produce_column_copies( - local_attributes_for_class, attribute_is_overridden + locally_collected_columns = self._produce_column_copies( + local_attributes_for_class, + attribute_is_overridden, + ) + else: + locally_collected_columns = {} + + bases.append( + ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, ) + ) + + for ( + base, + class_mapped, + local_attributes_for_class, + locally_collected_columns, + ) in bases: + + # this transfer can also take place as we scan each name + # for finer-grained control of how collected_attributes is + # populated, as this is what impacts column ordering. + # however it's simpler to get it out of the way here. + dict_.update(locally_collected_columns) for name, obj, is_dataclass in local_attributes_for_class(): if name == "__mapper_args__": @@ -640,6 +672,7 @@ def _produce_column_copies( ): cls = self.cls dict_ = self.dict_ + locally_collected_attributes = {} column_copies = self.column_copies # copy mixin columns to the mapped class @@ -664,7 +697,8 @@ def _produce_column_copies( column_copies[obj] = copy_ = obj._copy() copy_._creation_order = obj._creation_order setattr(cls, name, copy_) - dict_[name] = copy_ + locally_collected_attributes[name] = copy_ + return locally_collected_attributes def _extract_mappable_attributes(self): cls = self.cls diff --git a/test/orm/declarative/test_mixin.py b/test/orm/declarative/test_mixin.py index 78ab4dbfc3e..f8e0cf5adb3 100644 --- a/test/orm/declarative/test_mixin.py +++ b/test/orm/declarative/test_mixin.py @@ -2140,6 +2140,53 @@ class User(Base, HasAddressCount): "> :param_1", ) + def test_multilevel_mixin_attr_refers_to_column_copies(self): + """test #8190. + + This test is the same idea as test_mixin_attr_refers_to_column_copies + but tests the column copies from superclasses. + + """ + counter = mock.Mock() + + class SomeOtherMixin: + status = Column(String) + + class HasAddressCount(SomeOtherMixin): + id = Column(Integer, primary_key=True) + + @declared_attr + def address_count(cls): + counter(cls.id) + counter(cls.status) + return column_property( + select(func.count(Address.id)) + .where(Address.user_id == cls.id) + .where(cls.status == "some status") + .scalar_subquery() + ) + + class Address(Base): + __tablename__ = "address" + id = Column(Integer, primary_key=True) + user_id = Column(ForeignKey("user.id")) + + class User(Base, HasAddressCount): + __tablename__ = "user" + + eq_(counter.mock_calls, [mock.call(User.id), mock.call(User.status)]) + + sess = fixture_session() + self.assert_compile( + sess.query(User).having(User.address_count > 5), + "SELECT (SELECT count(address.id) AS count_1 FROM address " + 'WHERE address.user_id = "user".id AND "user".status = :param_1) ' + 'AS anon_1, "user".status AS user_status, "user".id AS user_id ' + 'FROM "user" HAVING (SELECT count(address.id) AS count_1 ' + 'FROM address WHERE address.user_id = "user".id ' + 'AND "user".status = :param_1) > :param_2', + ) + class AbstractTest(DeclarativeTestBase): def test_abstract_boolean(self): From 37b35cf645151772a0480c337e9c228a8d8ed05b Mon Sep 17 00:00:00 2001 From: Jefferson Oliveira Date: Wed, 29 Jun 2022 11:53:36 -0300 Subject: [PATCH 292/632] fix documentation typo (cherry picked from commit 52317cec36e1807d06a9715e2bcc4067b7eb173f) --- doc/build/tutorial/orm_data_manipulation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index 1ee5e95fa95..ca955d02377 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -314,7 +314,7 @@ dirty:: However note we are **still in a transaction** and our changes have not been pushed to the database's permanent storage. Since Sandy's last name is in fact "Cheeks" not "Squirrel", we will repair this mistake later when -we roll back the transction. But first we'll make some more data changes. +we roll back the transaction. But first we'll make some more data changes. .. seealso:: From d04f013ee3fc7275f35bbc775427c02b719d1ed4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 30 Jun 2022 13:33:30 -0400 Subject: [PATCH 293/632] add more cross-linking / notes for yield_per, partitions Change-Id: I0f8db2532827c76a2751186638d22104230db843 references: #8198 (cherry picked from commit 59bafe0fbefe16269c72ac39e699e4127d49841f) --- doc/build/orm/queryguide.rst | 19 ++++++++++++++++++- lib/sqlalchemy/engine/result.py | 20 ++++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 012206ba7c5..f3d4198398a 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1017,7 +1017,24 @@ often useful to use with a result partitioning method such as (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) ... -The purpose of this method is when fetching very large result sets +For expediency, the :meth:`_engine.Result.yield_per` method may also be used +with an ORM-enabled result set, which will have the same effect at result +fetching time as if the ``yield_per`` execution option were used. The +:meth:`_engine.Result.partitions` method, if used, automatically uses the +number sent to :meth:`_engine.Result.yield_per` as the number of rows in each +partition:: + + >>> stmt = select(User) + {sql}>>> for partition in session.execute(stmt).yield_per(10).partitions(): + ... for row in partition: + ... print(row) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) + ... + +The purpose of "yield per" is when fetching very large result sets (> 10K rows), to batch results in sub-collections and yield them out partially, so that the Python interpreter doesn't need to declare very large areas of memory which is both time consuming and leads diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index cb6906f0368..6ca8f8c9d9d 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -804,6 +804,12 @@ def yield_per(self, num): :param num: number of rows to fetch each time the buffer is refilled. If set to a value below 1, fetches all rows for the next buffer. + .. seealso:: + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + :meth:`_engine.Result.partitions` + """ self._yield_per = num @@ -997,6 +1003,13 @@ def partitions(self, size=None): results, if possible. Not all drivers support this option and the option is silently ignored for those who do not. + When using the ORM, the :meth:`_engine.Result.partitions` method + is typically more effective from a memory perspective when it is + combined with use of the :meth:`_engine.Result.yield_per` method, + which instructs the ORM loading internals to only build a certain + amount of ORM objects from a result at a time before yielding + them out. + .. versionadded:: 1.4 :param size: indicate the maximum number of rows to be present @@ -1007,6 +1020,13 @@ def partitions(self, size=None): :return: iterator of lists + .. seealso:: + + :paramref:`.Connection.execution_options.stream_results` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ getter = self._manyrow_getter From 3e98a236008c0d4b7b83e70cfd1365c98dd42528 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 30 Jun 2022 15:00:08 -0400 Subject: [PATCH 294/632] more partition doc adjustments the partition story is not very good, this is a ton of different options and they have to all be used simultaenously for the common case. Change-Id: I62963b7db1230a2670dda0ce812086f9265a3cb7 (cherry picked from commit aae1696a64509e54efd7d59a137c5ea6743363a7) --- doc/build/core/connections.rst | 21 ++++++++++++++------- doc/build/orm/queryguide.rst | 15 +++++++++------ 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index c683c7ee9df..b9ba6d9b392 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -707,22 +707,23 @@ buffers of the given size, only fetching new rows when the buffer is empty:: for row in result.yield_per(100): _process_row(row) -The ``stream_results`` option is also available with the ORM. When using the -ORM, either the :meth:`_engine.Result.yield_per` or :meth:`_engine.Result.partitions` -methods should be used to set the number of ORM rows to be buffered each time -while yielding:: +The ``stream_results`` option is also available with the ORM. When using the +ORM, the :meth:`_engine.Result.yield_per` method should be used to set the +number of ORM rows to be buffered each time while yielding +(:meth:`_engine.Result.partitions` uses the "yield per" value by default for +partition size):: with orm.Session(engine) as session: result = session.execute( select(User).order_by(User_id).execution_options(stream_results=True), ) - for partition in result.partitions(100): + for partition in result.yield_per(100).partitions(): _process_rows(partition) .. note:: ORM result sets currently must make use of :meth:`_engine.Result.yield_per` - or :meth:`_engine.Result.partitions` in order to achieve streaming ORM results. - If either of these methods are not used to set the number of rows to + in order to achieve streaming ORM results. + If the method is not used to set the number of rows to fetch before yielding, the entire result is fetched before rows are yielded. This may change in a future release so that the automatic buffer size used by :class:`_engine.Connection` takes place for ORM results as well. @@ -734,6 +735,12 @@ execution option:: for row in session.query(User).yield_per(100): # process row +.. seealso:: + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + :meth:`_engine.Result.partitions` + .. _dbengine_implicit: diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index f3d4198398a..261be27812e 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1018,14 +1018,17 @@ often useful to use with a result partitioning method such as ... For expediency, the :meth:`_engine.Result.yield_per` method may also be used -with an ORM-enabled result set, which will have the same effect at result -fetching time as if the ``yield_per`` execution option were used. The -:meth:`_engine.Result.partitions` method, if used, automatically uses the -number sent to :meth:`_engine.Result.yield_per` as the number of rows in each -partition:: +with an ORM-enabled result set, which will have the similar effect at result +fetching time as if the ``yield_per`` execution option were used, with the +exception that ``stream_results`` option, described below, is not set +automatically. The :meth:`_engine.Result.partitions` method, if used, +automatically uses the number sent to :meth:`_engine.Result.yield_per` as the +number of rows in each partition:: >>> stmt = select(User) - {sql}>>> for partition in session.execute(stmt).yield_per(10).partitions(): + {sql} >>> for partition in session.execute( + ... stmt, execution_options={"stream_results": True} + ... ).yield_per(10).partitions(): ... for row in partition: ... print(row) SELECT user_account.id, user_account.name, user_account.fullname From 06685f392d2e36981b4073b902539ad966c57327 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 30 Jun 2022 19:10:06 -0400 Subject: [PATCH 295/632] repair yield_per for non-SS dialects and add new options Implemented new :paramref:`_engine.Connection.execution_options.yield_per` execution option for :class:`_engine.Connection` in Core, to mirror that of the same :ref:`yield_per ` option available in the ORM. The option sets both the :paramref:`_engine.Connection.execution_options.stream_results` option at the same time as invoking :meth:`_engine.Result.yield_per`, to provide the most common streaming result configuration which also mirrors that of the ORM use case in its usage pattern. Fixed bug in :class:`_engine.Result` where the usage of a buffered result strategy would not be used if the dialect in use did not support an explicit "server side cursor" setting, when using :paramref:`_engine.Connection.execution_options.stream_results`. This is in error as DBAPIs such as that of SQLite and Oracle already use a non-buffered result fetching scheme, which still benefits from usage of partial result fetching. The "buffered" strategy is now used in all cases where :paramref:`_engine.Connection.execution_options.stream_results` is set. Added :meth:`.FilterResult.yield_per` so that result implementations such as :class:`.MappingResult`, :class:`.ScalarResult` and :class:`.AsyncResult` have access to this method. Fixes: #8199 Change-Id: I6dde3cbe483a1bf81e945561b60f4b7d1c434750 (cherry picked from commit e5a0cdb2eaa1d7f381e93d0529a7f8e6d5888877) --- doc/build/changelog/unreleased_14/yp.rst | 38 +++++ doc/build/core/connections.rst | 201 +++++++++++++++-------- doc/build/orm/queryguide.rst | 93 ++++++----- lib/sqlalchemy/engine/__init__.py | 1 + lib/sqlalchemy/engine/base.py | 89 +++++++++- lib/sqlalchemy/engine/cursor.py | 1 - lib/sqlalchemy/engine/default.py | 10 +- lib/sqlalchemy/engine/result.py | 77 ++++++--- lib/sqlalchemy/ext/asyncio/result.py | 3 +- lib/sqlalchemy/orm/context.py | 9 +- lib/sqlalchemy/orm/query.py | 4 + lib/sqlalchemy/testing/fixtures.py | 22 +++ test/ext/asyncio/test_engine_py3k.py | 51 ++++-- test/orm/test_query.py | 12 +- test/sql/test_resultset.py | 131 +++++++++++++++ 15 files changed, 588 insertions(+), 154 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/yp.rst diff --git a/doc/build/changelog/unreleased_14/yp.rst b/doc/build/changelog/unreleased_14/yp.rst new file mode 100644 index 00000000000..74e2c6a468f --- /dev/null +++ b/doc/build/changelog/unreleased_14/yp.rst @@ -0,0 +1,38 @@ +.. change:: + :tags: usecase, engine + + Implemented new :paramref:`_engine.Connection.execution_options.yield_per` + execution option for :class:`_engine.Connection` in Core, to mirror that of + the same :ref:`yield_per ` option available in + the ORM. The option sets both the + :paramref:`_engine.Connection.execution_options.stream_results` option at + the same time as invoking :meth:`_engine.Result.yield_per`, to provide the + most common streaming result configuration which also mirrors that of the + ORM use case in its usage pattern. + + .. seealso:: + + :ref:`engine_stream_results` - revised documentation + + +.. change:: + :tags: bug, engine + + Fixed bug in :class:`_engine.Result` where the usage of a buffered result + strategy would not be used if the dialect in use did not support an + explicit "server side cursor" setting, when using + :paramref:`_engine.Connection.execution_options.stream_results`. This is in + error as DBAPIs such as that of SQLite and Oracle already use a + non-buffered result fetching scheme, which still benefits from usage of + partial result fetching. The "buffered" strategy is now used in all + cases where :paramref:`_engine.Connection.execution_options.stream_results` + is set. + + +.. change:: + :tags: bug, engine + :tickets: 8199 + + Added :meth:`.FilterResult.yield_per` so that result implementations + such as :class:`.MappingResult`, :class:`.ScalarResult` and + :class:`.AsyncResult` have access to this method. diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index b9ba6d9b392..5228235e73f 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -633,20 +633,33 @@ To sum up: Using Server Side Cursors (a.k.a. stream results) ================================================== -A limited number of dialects have explicit support for the concept of "server -side cursors" vs. "buffered cursors". While a server side cursor implies a -variety of different capabilities, within SQLAlchemy's engine and dialect -implementation, it refers only to whether or not a particular set of results is -fully buffered in memory before they are fetched from the cursor, using a -method such as ``cursor.fetchall()``. SQLAlchemy has no direct support -for cursor behaviors such as scrolling; to make use of these features for -a particular DBAPI, use the cursor directly as documented at -:ref:`dbapi_connections`. - -Some DBAPIs, such as the cx_Oracle DBAPI, exclusively use server side cursors -internally. All result sets are essentially unbuffered across the total span -of a result set, utilizing only a smaller buffer that is of a fixed size such -as 100 rows at a time. +Some backends feature explicit support for the concept of "server +side cursors" versus "client side cursors". A client side cursor here +means that the database driver fully fetches all rows from a result set +into memory before returning from a statement execution. Drivers such as +those of PostgreSQL and MySQL/MariaDB generally use client side cursors +by default. A server side cursor, by contrast, indicates that result rows +remain pending within the database server's state as result rows are consumed +by the client. The drivers for Oracle generally use a "server side" model, +for example, and the SQLite dialect, while not using a real "client / server" +architecture, still uses an unbuffered result fetching approach that will +leave result rows outside of process memory before they are consumed. + +.. topic:: What we really mean is "buffered" vs. "unbuffered" results + + Server side cursors also imply a wider set of features with relational + databases, such as the ability to "scroll" a cursor forwards and backwards. + SQLAlchemy does not include any explicit support for these behaviors; within + SQLAlchemy itself, the general term "server side cursors" should be considered + to mean "unbuffered results" and "client side cursors" means "result rows + are buffered into memory before the first row is returned". To work with + a richer "server side cursor" featureset specific to a certain DBAPI driver, + see the section :ref:`dbapi_connections_cursor`. + +From this basic architecture it follows that a "server side cursor" is more +memory efficient when fetching very large result sets, while at the same time +may introduce more complexity in the client/server communication process +and be less efficient for small result sets (typically less than 10000 rows). For those dialects that have conditional support for buffered or unbuffered results, there are usually caveats to the use of the "unbuffered", or server @@ -665,75 +678,119 @@ unbuffered cursors are not generally useful except in the uncommon case of an application fetching a very large number of rows in chunks, where the processing of these rows can be complete before more rows are fetched. -To make use of a server side cursor for a particular execution, the -:paramref:`_engine.Connection.execution_options.stream_results` option -is used, which may be called on the :class:`_engine.Connection` object, -on the statement object, or in the ORM-level contexts mentioned below. - -When using this option for a statement, it's usually appropriate to use -a method like :meth:`_engine.Result.partitions` to work on small sections -of the result set at a time, while also fetching enough rows for each -pull so that the operation is efficient:: +For database drivers that provide client and server side cursor options, +the :paramref:`_engine.Connection.execution_options.stream_results` +and :paramref:`_engine.Connection.execution_options.yield_per` execution +options provide access to "server side cursors" on a per-:class:`_engine.Connection` +or per-statement basis. Similar options exist when using an ORM +:class:`_orm.Session` as well. - with engine.connect() as conn: - result = conn.execution_options(stream_results=True).execute(text("select * from table")) +Streaming with a fixed buffer via yield_per +-------------------------------------------- - for partition in result.partitions(100): - _process_rows(partition) +As individual row-fetch operations with fully unbuffered server side cursors +are typically more expensive than fetching batches of rows at once, The +:paramref:`_engine.Connection.execution_options.yield_per` execution option +configures a :class:`_engine.Connection` or statement to make use of +server-side cursors as are available, while at the same time configuring a +fixed-size buffer of rows that will retrieve rows from the server in batches as +they are consumed. This parameter may be to a positive integer value using the +:meth:`_engine.Connection.execution_options` method on +:class:`_engine.Connection` or on a statement using the +:meth:`.Executable.execution_options` method. + +.. versionadded:: 1.4.40 :paramref:`_engine.Connection.execution_options.yield_per` as a + Core-only option is new as of SQLAlchemy 1.4.40; for prior 1.4 versions, + use :paramref:`_engine.Connection.execution_options.stream_results` + directly in combination with :meth:`_engine.Result.yield_per`. + +Using this option is equivalent to manually setting the +:paramref:`_engine.Connection.execution_options.stream_results` option, +described in the next section, and then invoking the +:meth:`_engine.Result.yield_per` method on the :class:`_engine.Result` +object with the given integer value. In both cases, the effect this +combination has includes: + +* server side cursors mode is selected for the given backend, if available + and not already the default behavior for that backend +* as result rows are fetched, they will be buffered in batches, where the + size of each batch up until the last batch will be equal to the integer + argument passed to the + :paramref:`_engine.Connection.execution_options.yield_per` option or the + :meth:`_engine.Result.yield_per` method; the last batch is then sized against + the remaining rows fewer than this size +* The default partition size used by the :meth:`_engine.Result.partitions` + method, if used, will be made equal to this integer size as well. + +These three behaviors are illustrated in the example below:: + with engine.connect() as conn: + result = ( + conn. + execution_options(yield_per=100). + execute(text("select * from table")) + ) -If the :class:`_engine.Result` is iterated directly, rows are fetched internally + for partition in result.partitions(): + # partition is an iterable that will be at most 100 items + for row in partition: + print(f"{row}") + +The above example illustrates the combination of ``yield_per=100`` along +with using the :meth:`_engine.Result.partitions` method to run processing +on rows in batches that match the size fetched from the server. The +use of :meth:`_engine.Result.partitions` is optional, and if the +:class:`_engine.Result` is iterated directly, a new batch of rows will be +buffered for each 100 rows fetched. Calling a method such as +:meth:`_engine.Result.all` should **not** be used, as this will fully +fetch all remaining rows at once and defeat the purpose of using ``yield_per``. + +The :paramref:`_engine.Connection.execution_options.yield_per` option +is portable to the ORM as well, used by a :class:`_orm.Session` to fetch +ORM objects, where it also limits the amount of ORM objects generated at once. +See the section :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` +for further background on using +:paramref:`_engine.Connection.execution_options.yield_per` with the ORM. + +.. versionadded:: 1.4.40 Added + :paramref:`_engine.Connection.execution_options.yield_per` + as a Core level execution option to conveniently set streaming results, + buffer size, and partition size all at once in a manner that is transferrable + to that of the ORM's similar use case. + +.. _engine_stream_results_sr: + +Streaming with a dynamically growing buffer using stream_results +----------------------------------------------------------------- + +To enable server side cursors without a specific partition size, the +:paramref:`_engine.Connection.execution_options.stream_results` option may be +used, which like :paramref:`_engine.Connection.execution_options.yield_per` may +be called on the :class:`_engine.Connection` object or the statement object. + +When a :class:`_engine.Result` object delivered using the +:paramref:`_engine.Connection.execution_options.stream_results` option +is iterated directly, rows are fetched internally using a default buffering scheme that buffers first a small set of rows, then a larger and larger buffer on each fetch up to a pre-configured limit -of 1000 rows. This can be affected using the ``max_row_buffer`` execution -option:: +of 1000 rows. The maximum size of this buffer can be affected using the +:paramref:`_engine.Connection.execution_options.max_row_buffer` execution option:: with engine.connect() as conn: conn = conn.execution_options(stream_results=True, max_row_buffer=100) result = conn.execute(text("select * from table")) for row in result: - _process_row(row) - -The size of the buffer may also be set to a fixed size using the -:meth:`_engine.Result.yield_per` method. Calling this method with a number -of rows will cause all result-fetching methods to work from -buffers of the given size, only fetching new rows when the buffer is empty:: - - with engine.connect() as conn: - result = conn.execution_options(stream_results=True).execute(text("select * from table")) + print(f"{row}") - for row in result.yield_per(100): - _process_row(row) - -The ``stream_results`` option is also available with the ORM. When using the -ORM, the :meth:`_engine.Result.yield_per` method should be used to set the -number of ORM rows to be buffered each time while yielding -(:meth:`_engine.Result.partitions` uses the "yield per" value by default for -partition size):: - - with orm.Session(engine) as session: - result = session.execute( - select(User).order_by(User_id).execution_options(stream_results=True), - ) - for partition in result.yield_per(100).partitions(): - _process_rows(partition) - - -.. note:: ORM result sets currently must make use of :meth:`_engine.Result.yield_per` - in order to achieve streaming ORM results. - If the method is not used to set the number of rows to - fetch before yielding, the entire result is fetched before rows are yielded. - This may change in a future release so that the automatic buffer size used - by :class:`_engine.Connection` takes place for ORM results as well. - -When using a :term:`1.x style` ORM query with :class:`_orm.Query`, yield_per is -available via :meth:`_orm.Query.yield_per` - this also sets the ``stream_results`` -execution option:: - - for row in session.query(User).yield_per(100): - # process row +While the :paramref:`_engine.Connection.execution_options.stream_results` +option may be combined with use of the :meth:`_engine.Result.partitions` +method, a specific partition size should be passed to +:meth:`_engine.Result.partitions` so that the entire result is not fetched. +It is usually more straightforward to use the +:paramref:`_engine.Connection.execution_options.yield_per` option when setting +up to use the :meth:`_engine.Result.partitions` method. .. seealso:: @@ -741,6 +798,7 @@ execution option:: :meth:`_engine.Result.partitions` + :meth:`_engine.Result.yield_per` .. _dbengine_implicit: @@ -1973,6 +2031,8 @@ method may be used:: .. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method. +.. _dbapi_connections_cursor: + Working with the DBAPI cursor directly -------------------------------------- @@ -2178,6 +2238,9 @@ Result Set API .. autoclass:: ChunkedIteratorResult :members: +.. autoclass:: FilterResult + :members: + .. autoclass:: FrozenResult :members: diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 261be27812e..06d3dace903 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1003,32 +1003,62 @@ Yield Per ^^^^^^^^^ The ``yield_per`` execution option is an integer value which will cause the -:class:`_engine.Result` to yield only a fixed count of rows at a time. It is -often useful to use with a result partitioning method such as -:meth:`_engine.Result.partitions`, e.g.:: +:class:`_engine.Result` to yield only a fixed count of rows at a time. +When used as an execution option, ``yield_per`` is equivalent to making use +of both the :paramref:`_engine.Connection.execution_options.stream_results` +execution option, which selects for server side cursors to be used +by the backend if supported, and the :meth:`_engine.Result.yield_per` method +on the returned :class:`_engine.Result` object, +which establishes a fixed size of rows to be fetched as well as a +corresponding limit to how many ORM objects will be constructed at once. + +.. tip:: + + ``yield_per`` is now available as a Core execution option as well, + described in detail at :ref:`engine_stream_results`. This section details + the use of ``yield_per`` as an execution option with an ORM + :class:`_orm.Session`. The option behaves as similarly as possible + in both contexts. + +``yield_per`` when used with the ORM is typically established either +via the :meth:`.Executable.execution_options` method on the given statement +or by passing it to the :paramref:`_orm.Session.execute.execution_options` +parameter of :meth:`_orm.Session.execute` or other similar :class:`_orm.Session` +method. In the example below its invoked upon a statement:: >>> stmt = select(User).execution_options(yield_per=10) - {sql}>>> for partition in session.execute(stmt).partitions(10): - ... for row in partition: - ... print(row) + {sql}>>> for row in session.execute(stmt): + ... print(row) + SELECT user_account.id, user_account.name, user_account.fullname + FROM user_account + [...] (){stop} + (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) + ... + +The above code is mostly equivalent as making use of the +:paramref:`_engine.Connection.execution_options.stream_results` execution +option, setting the :paramref:`_engine.Connection.execution_options.max_row_buffer` +to the given integer size, and then using the :meth:`_engine.Result.yield_per` +method on the :class:`_engine.Result` returned by the +:class:`_orm.Session`, as in the following example:: + + # equivalent code + >>> stmt = select(User).execution_options(stream_results=True, max_row_buffer=10) + {sql}>>> for row in session.execute(stmt).yield_per(10): + ... print(row) SELECT user_account.id, user_account.name, user_account.fullname FROM user_account [...] (){stop} (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) ... -For expediency, the :meth:`_engine.Result.yield_per` method may also be used -with an ORM-enabled result set, which will have the similar effect at result -fetching time as if the ``yield_per`` execution option were used, with the -exception that ``stream_results`` option, described below, is not set -automatically. The :meth:`_engine.Result.partitions` method, if used, -automatically uses the number sent to :meth:`_engine.Result.yield_per` as the -number of rows in each partition:: - - >>> stmt = select(User) - {sql} >>> for partition in session.execute( - ... stmt, execution_options={"stream_results": True} - ... ).yield_per(10).partitions(): +``yield_per`` is also commonly used in combination with the +:meth:`_engine.Result.partitions` method, that will iterate rows in grouped +partitions. The size of each partition defaults to the integer value passed to +``yield_per``, as in the below example:: + + >>> stmt = select(User).execution_options(yield_per=10) + {sql}>>> for partition in session.execute(stmt).partitions(): ... for row in partition: ... print(row) SELECT user_account.id, user_account.name, user_account.fullname @@ -1041,20 +1071,17 @@ The purpose of "yield per" is when fetching very large result sets (> 10K rows), to batch results in sub-collections and yield them out partially, so that the Python interpreter doesn't need to declare very large areas of memory which is both time consuming and leads -to excessive memory use. The performance from fetching hundreds of -thousands of rows can often double when a suitable yield-per setting -(e.g. approximately 1000) is used, even with DBAPIs that buffer -rows (which are most). +to excessive memory use. When ``yield_per`` is used, the :paramref:`_engine.Connection.execution_options.stream_results` option is also set for the Core execution, so that a streaming / server side cursor will be -used if the backend supports it [1]_ +used if the backend supports it. The ``yield_per`` execution option **is not compatible with subqueryload eager loading or joinedload eager loading when using collections**. It is -potentially compatible with selectinload eager loading, **provided the database -driver supports multiple, independent cursors** [2]_ . +potentially compatible with selectinload eager loading, provided the database +driver supports multiple, independent cursors. Additionally, the ``yield_per`` execution option is not compatible with the :meth:`_engine.Result.unique` method; as this method relies upon @@ -1067,20 +1094,10 @@ large number of rows. :meth:`_engine.Result.unique` filter, at the same time as the ``yield_per`` execution option is used. -The ``yield_per`` execution option is equvialent to the -:meth:`_orm.Query.yield_per` method in :term:`1.x style` ORM queries. - -.. [1] currently known are - :mod:`_postgresql.psycopg2`, - :mod:`_mysql.mysqldb` and - :mod:`_mysql.pymysql`. Other backends will pre buffer - all rows. The memory use of raw database rows is much less than that of an - ORM-mapped object, but should still be taken into consideration when - benchmarking. +When using the legacy :class:`_orm.Query` object with +:term:`1.x style` ORM use, the :meth:`_orm.Query.yield_per` method +will have the same result as that of the ``yield_per`` execution option. -.. [2] the :mod:`_postgresql.psycopg2` - and :mod:`_sqlite.pysqlite` drivers are - known to work, drivers for MySQL and SQL Server ODBC drivers do not. .. seealso:: diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 488e41de33b..2437e170dfa 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -44,6 +44,7 @@ from .mock import create_mock_engine from .reflection import Inspector from .result import ChunkedIteratorResult +from .result import FilterResult from .result import FrozenResult from .result import IteratorResult from .result import MappingResult diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 1507e159ed6..8a8cab140b0 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -364,15 +364,89 @@ def execution_options(self, **opt): :param stream_results: Available on: Connection, statement. Indicate to the dialect that results should be - "streamed" and not pre-buffered, if possible. This is a limitation - of many DBAPIs. The flag is currently understood within a subset - of dialects within the PostgreSQL and MySQL categories, and - may be supported by other third party dialects as well. + "streamed" and not pre-buffered, if possible. For backends + such as PostgreSQL, MySQL and MariaDB, this indicates the use of + a "server side cursor" as opposed to a client side cursor. + Other backends such as that of Oracle may already use server + side cursors by default. + + The usage of + :paramref:`_engine.Connection.execution_options.stream_results` is + usually combined with setting a fixed number of rows to to be fetched + in batches, to allow for efficient iteration of database rows while + at the same time not loading all result rows into memory at once; + this can be configured on a :class:`_engine.Result` object using the + :meth:`_engine.Result.yield_per` method, after execution has + returned a new :class:`_engine.Result`. If + :meth:`_engine.Result.yield_per` is not used, + the :paramref:`_engine.Connection.execution_options.stream_results` + mode of operation will instead use a dynamically sized buffer + which buffers sets of rows at a time, growing on each batch + based on a fixed growth size up until a limit which may + be configured using the + :paramref:`_engine.Connection.execution_options.max_row_buffer` + parameter. + + When using the ORM to fetch ORM mapped objects from a result, + :meth:`_engine.Result.yield_per` should always be used with + :paramref:`_engine.Connection.execution_options.stream_results`, + so that the ORM does not fetch all rows into new ORM objects at once. + + For typical use, the + :paramref:`_engine.Connection.execution_options.yield_per` execution + option should be preferred, which sets up both + :paramref:`_engine.Connection.execution_options.stream_results` and + :meth:`_engine.Result.yield_per` at once. This option is supported + both at a core level by :class:`_engine.Connection` as well as by the + ORM :class:`_engine.Session`; the latter is described at + :ref:`orm_queryguide_yield_per`. .. seealso:: + :ref:`engine_stream_results` - background on + :paramref:`_engine.Connection.execution_options.stream_results` + + :paramref:`_engine.Connection.execution_options.max_row_buffer` + + :paramref:`_engine.Connection.execution_options.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param max_row_buffer: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Sets a maximum + buffer size to use when the + :paramref:`_engine.Connection.execution_options.stream_results` + execution option is used on a backend that supports server side + cursors. The default value if not specified is 1000. + + .. seealso:: + + :paramref:`_engine.Connection.execution_options.stream_results` + :ref:`engine_stream_results` + + :param yield_per: Available on: :class:`_engine.Connection`, + :class:`_sql.Executable`. Integer value applied which will + set the :paramref:`_engine.Connection.execution_options.stream_results` + execution option and invoke :meth:`_engine.Result.yield_per` + automatically at once. Allows equivalent functionality as + is present when using this parameter with the ORM. + + .. versionadded:: 1.4.40 + + .. seealso:: + + :ref:`engine_stream_results` - background and examples + on using server side cursors with Core. + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + describing the ORM version of ``yield_per`` + + :param schema_translate_map: Available on: :class:`_engine.Connection`, + :class:`_engine.Engine`, :class:`_sql.Executable`. + :param schema_translate_map: Available on: Connection, Engine. A dictionary mapping schema names to schema names, that will be applied to the :paramref:`_schema.Table.schema` element of each @@ -1711,6 +1785,13 @@ def _execute_context( # the only feature that branching provides self = self.__branch_from + if execution_options: + yp = execution_options.get("yield_per", None) + if yp: + execution_options = execution_options.union( + {"stream_results": True, "max_row_buffer": yp} + ) + try: conn = self._dbapi_connection if conn is None: diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index e17422e1c31..abe58e2fde1 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1021,7 +1021,6 @@ def __init__( growth_factor=5, initial_buffer=None, ): - self._max_row_buffer = execution_options.get("max_row_buffer", 1000) if initial_buffer is not None: diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 028c4b0713a..268a2d60930 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1445,11 +1445,16 @@ def supports_sane_multi_rowcount(self): return self.dialect.supports_sane_multi_rowcount def _setup_result_proxy(self): + exec_opt = self.execution_options + if self.is_crud or self.is_text: result = self._setup_dml_or_text_result() + yp = sr = False else: + yp = exec_opt.get("yield_per", None) + sr = self._is_server_side or exec_opt.get("stream_results", False) strategy = self.cursor_fetch_strategy - if self._is_server_side and strategy is _cursor._DEFAULT_FETCH: + if sr and strategy is _cursor._DEFAULT_FETCH: strategy = _cursor.BufferedRowCursorFetchStrategy( self.cursor, self.execution_options ) @@ -1482,6 +1487,9 @@ def _setup_result_proxy(self): self._soft_closed = result._soft_closed + if yp: + result = result.yield_per(yp) + return result def _setup_out_parameters(self, result): diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 6ca8f8c9d9d..912dccf4bf8 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -773,7 +773,7 @@ def close(self): @_generative def yield_per(self, num): - """Configure the row-fetching strategy to fetch num rows at a time. + """Configure the row-fetching strategy to fetch ``num`` rows at a time. This impacts the underlying behavior of the result when iterating over the result object, or otherwise making use of methods such as @@ -788,16 +788,24 @@ def yield_per(self, num): conjunction with the :paramref:`_engine.Connection.execution_options.stream_results` execution option, which will allow the database dialect in use to make - use of a server side cursor, if the DBAPI supports it. + use of a server side cursor, if the DBAPI supports a specific "server + side cursor" mode separate from its default mode of operation. - Most DBAPIs do not use server side cursors by default, which means all - rows will be fetched upfront from the database regardless of the - :meth:`_engine.Result.yield_per` setting. However, - :meth:`_engine.Result.yield_per` may still be useful in that it batches - the SQLAlchemy-side processing of the raw data from the database, and - additionally when used for ORM scenarios will batch the conversion of - database rows into ORM entity rows. + .. tip:: + Consider using the + :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which will simultaneously set + :paramref:`_engine.Connection.execution_options.stream_results` + to ensure the use of server side cursors, as well as automatically + invoke the :meth:`_engine.Result.yield_per` method to establish + a fixed row buffer size at once. + + The :paramref:`_engine.Connection.execution_options.yield_per` + execution option is available for ORM operations, with + :class:`_orm.Session`-oriented use described at + :ref:`orm_queryguide_yield_per`. The Core-only version which works + with :class:`_engine.Connection` is new as of SQLAlchemy 1.4.40. .. versionadded:: 1.4 @@ -806,9 +814,10 @@ def yield_per(self, num): .. seealso:: - :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` - :meth:`_engine.Result.partitions` + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` """ self._yield_per = num @@ -1005,24 +1014,29 @@ def partitions(self, size=None): When using the ORM, the :meth:`_engine.Result.partitions` method is typically more effective from a memory perspective when it is - combined with use of the :meth:`_engine.Result.yield_per` method, - which instructs the ORM loading internals to only build a certain - amount of ORM objects from a result at a time before yielding - them out. + combined with use of the + :ref:`yield_per execution option `, + which instructs both the DBAPI driver to use server side cursors, + if available, as well as instructs the ORM loading internals to only + build a certain amount of ORM objects from a result at a time before + yielding them out. .. versionadded:: 1.4 :param size: indicate the maximum number of rows to be present in each list yielded. If None, makes use of the value set by - :meth:`_engine.Result.yield_per`, if present, otherwise uses the - :meth:`_engine.Result.fetchmany` default which may be backend - specific. + the :meth:`_engine.Result.yield_per`, method, if it were called, + or the :paramref:`_engine.Connection.execution_options.yield_per` + execution option, which is equivalent in this regard. If + yield_per weren't set, it makes use of the + :meth:`_engine.Result.fetchmany` default, which may be backend + specific and not well defined. :return: iterator of lists .. seealso:: - :paramref:`.Connection.execution_options.stream_results` + :ref:`engine_stream_results` :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` @@ -1283,10 +1297,35 @@ class FilterResult(ResultInternal): """A wrapper for a :class:`_engine.Result` that returns objects other than :class:`_result.Row` objects, such as dictionaries or scalar objects. + :class:`.FilterResult` is the common base for additional result + APIs including :class:`.MappingResult`, :class:`.ScalarResult` + and :class:`.AsyncResult`. + """ _post_creational_filter = None + @_generative + def yield_per(self, num): + """Configure the row-fetching strategy to fetch ``num`` rows at a time. + + The :meth:`_engine.FilterResult.yield_per` method is a pass through + to the :meth:`_engine.Result.yield_per` method. See that method's + documentation for usage notes. + + .. versionadded:: 1.4.40 - added :meth:`_engine.FilterResult.yield_per` + so that the method is available on all result set implementations + + .. seealso:: + + :ref:`engine_stream_results` - describes Core behavior for + :meth:`_engine.Result.yield_per` + + :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` + + """ + self._real_result = self._real_result.yield_per(num) + def _soft_close(self, hard=False): self._real_result._soft_close(hard=hard) diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index 62e4a9a0e54..c69fe191bec 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -12,6 +12,7 @@ from ...engine.result import FilterResult from ...engine.result import FrozenResult from ...engine.result import MergedResult +from ...sql.base import _generative from ...util.concurrency import greenlet_spawn @@ -63,6 +64,7 @@ def keys(self): """ return self._metadata.keys + @_generative def unique(self, strategy=None): """Apply unique filtering to the objects returned by this :class:`_asyncio.AsyncResult`. @@ -73,7 +75,6 @@ def unique(self, strategy=None): """ self._unique_filter_state = (set(), strategy) - return self def columns(self, *col_expressions): r"""Establish the columns that should be returned in each row. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index ab1fc4045b8..7cedc2b43cb 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -286,14 +286,9 @@ def orm_pre_session_exec( else: execution_options = execution_options.union(_orm_load_exec_options) - if "yield_per" in execution_options or load_options._yield_per: + if load_options._yield_per: execution_options = execution_options.union( - { - "stream_results": True, - "max_row_buffer": execution_options.get( - "yield_per", load_options._yield_per - ), - } + {"yield_per": load_options._yield_per} ) bind_arguments["clause"] = statement diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 0ab39120658..99e45914319 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -850,6 +850,10 @@ def yield_per(self, count): level. See the section :ref:`orm_queryguide_yield_per` for further background on this option. + .. seealso:: + + :ref:`orm_queryguide_yield_per` + """ self.load_options += {"_yield_per": count} diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index ec7f2de4b45..0a2d63b5480 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -85,6 +85,28 @@ def connection(self): # run a close all connections. conn.close() + @config.fixture() + def close_result_when_finished(self): + to_close = [] + to_consume = [] + + def go(result, consume=False): + to_close.append(result) + if consume: + to_consume.append(result) + + yield go + for r in to_consume: + try: + r.all() + except: + pass + for r in to_close: + try: + r.close() + except: + pass + @config.fixture() def registry(self, metadata): reg = registry(metadata=metadata) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index d8d9e702113..eddf4e52fc3 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -15,6 +15,7 @@ from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union_all +from sqlalchemy.engine import cursor as _cursor from sqlalchemy.ext.asyncio import async_engine_from_config from sqlalchemy.ext.asyncio import create_async_engine from sqlalchemy.ext.asyncio import engine as _async_engine @@ -873,20 +874,53 @@ async def test_columns_all(self, async_engine): @testing.combinations( (None,), ("scalars",), ("mappings",), argnames="filter_" ) + @testing.combinations(None, 2, 5, 10, argnames="yield_per") + @testing.combinations("method", "opt", argnames="yield_per_type") @async_test - async def test_partitions(self, async_engine, filter_): + async def test_partitions( + self, async_engine, filter_, yield_per, yield_per_type + ): users = self.tables.users async with async_engine.connect() as conn: - result = await conn.stream(select(users)) + stmt = select(users) + if yield_per and yield_per_type == "opt": + stmt = stmt.execution_options(yield_per=yield_per) + result = await conn.stream(stmt) if filter_ == "mappings": result = result.mappings() elif filter_ == "scalars": result = result.scalars(1) + if yield_per and yield_per_type == "method": + result = result.yield_per(yield_per) + check_result = [] - async for partition in result.partitions(5): - check_result.append(partition) + + # stream() sets stream_results unconditionally + assert isinstance( + result._real_result.cursor_strategy, + _cursor.BufferedRowCursorFetchStrategy, + ) + + if yield_per: + partition_size = yield_per + + eq_(result._real_result.cursor_strategy._bufsize, yield_per) + + async for partition in result.partitions(): + check_result.append(partition) + else: + eq_(result._real_result.cursor_strategy._bufsize, 5) + + partition_size = 5 + async for partition in result.partitions(partition_size): + check_result.append(partition) + + ranges = [ + (i, min(20, i + partition_size)) + for i in range(1, 21, partition_size) + ] if filter_ == "mappings": eq_( @@ -896,23 +930,20 @@ async def test_partitions(self, async_engine, filter_): {"user_id": i, "user_name": "name%d" % i} for i in range(a, b) ] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] + for (a, b) in ranges ], ) elif filter_ == "scalars": eq_( check_result, - [ - ["name%d" % i for i in range(a, b)] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] - ], + [["name%d" % i for i in range(a, b)] for (a, b) in ranges], ) else: eq_( check_result, [ [(i, "name%d" % i) for i in range(a, b)] - for (a, b) in [(1, 6), (6, 11), (11, 16), (16, 20)] + for (a, b) in ranges ], ) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 0539e6fe658..ddaa3c60dab 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -38,6 +38,7 @@ from sqlalchemy import Unicode from sqlalchemy import union from sqlalchemy import util +from sqlalchemy.engine import cursor as _cursor from sqlalchemy.engine import default from sqlalchemy.ext.compiler import compiles from sqlalchemy.orm import aliased @@ -5406,8 +5407,7 @@ def check(ctx): if not k.startswith("_") }, { - "max_row_buffer": 15, - "stream_results": True, + "yield_per": 15, "foo": "bar", "future_result": True, }, @@ -5435,8 +5435,6 @@ def check(ctx): if not k.startswith("_") }, { - "max_row_buffer": 15, - "stream_results": True, "yield_per": 15, "future_result": True, }, @@ -5444,6 +5442,12 @@ def check(ctx): stmt = select(User).execution_options(yield_per=15) result = sess.execute(stmt) + + assert isinstance( + result.raw.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + eq_(result.raw.cursor_strategy._max_row_buffer, 15) + eq_(len(result.all()), 4) def test_no_joinedload_opt(self): diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 088f5807474..13ffc5eebdf 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -97,6 +97,12 @@ def define_tables(cls, metadata): Column("user_name", VARCHAR(20)), test_needs_acid=True, ) + Table( + "test", + metadata, + Column("x", Integer, primary_key=True), + Column("y", String(50)), + ) def test_row_iteration(self, connection): users = self.tables.users @@ -1766,6 +1772,131 @@ def __getattr__(self, name): with expect_raises_message(Exception, "canary"): r.lastrowid + @testing.combinations("plain", "mapping", "scalar", argnames="result_type") + @testing.combinations( + "stream_results", "yield_per", "yield_per_meth", argnames="optname" + ) + @testing.combinations(10, 50, argnames="value") + @testing.combinations("meth", "stmt", argnames="send_opts_how") + def test_stream_options( + self, + connection, + optname, + value, + send_opts_how, + result_type, + close_result_when_finished, + ): + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 3000)], + ) + + if optname == "stream_results": + opts = {"stream_results": True, "max_row_buffer": value} + elif optname == "yield_per": + opts = {"yield_per": value} + elif optname == "yield_per_meth": + opts = {"stream_results": True} + else: + assert False + + if send_opts_how == "meth": + result = connection.execution_options(**opts).execute( + table.select() + ) + elif send_opts_how == "stmt": + result = connection.execute( + table.select().execution_options(**opts) + ) + else: + assert False + + if result_type == "mapping": + result = result.mappings() + real_result = result._real_result + elif result_type == "scalar": + result = result.scalars() + real_result = result._real_result + else: + real_result = result + + if optname == "yield_per_meth": + result = result.yield_per(value) + + if result_type == "mapping" or result_type == "scalar": + real_result = result._real_result + else: + real_result = result + + close_result_when_finished(result, consume=True) + + if optname == "yield_per" and value is not None: + expected_opt = { + "stream_results": True, + "max_row_buffer": value, + "yield_per": value, + } + elif optname == "stream_results" and value is not None: + expected_opt = { + "stream_results": True, + "max_row_buffer": value, + } + else: + expected_opt = None + + if expected_opt is not None: + eq_(real_result.context.execution_options, expected_opt) + + if value is None: + assert isinstance( + real_result.cursor_strategy, _cursor.CursorFetchStrategy + ) + return + + assert isinstance( + real_result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy + ) + eq_(real_result.cursor_strategy._max_row_buffer, value) + + if optname == "yield_per" or optname == "yield_per_meth": + eq_(real_result.cursor_strategy._bufsize, value) + else: + eq_(real_result.cursor_strategy._bufsize, min(value, 5)) + eq_(len(real_result.cursor_strategy._rowbuffer), 1) + + next(result) + next(result) + + if optname == "yield_per" or optname == "yield_per_meth": + eq_(len(real_result.cursor_strategy._rowbuffer), value - 1) + else: + # based on default growth of 5 + eq_(len(real_result.cursor_strategy._rowbuffer), 4) + + for i, row in enumerate(result): + if i == 186: + break + + if optname == "yield_per" or optname == "yield_per_meth": + eq_( + len(real_result.cursor_strategy._rowbuffer), + value - (188 % value), + ) + else: + # based on default growth of 5 + eq_( + len(real_result.cursor_strategy._rowbuffer), + 7 if value == 10 else 42, + ) + + if optname == "yield_per" or optname == "yield_per_meth": + # ensure partition is set up to same size + partition = next(result.partitions()) + eq_(len(partition), value) + class KeyTargetingTest(fixtures.TablesTest): run_inserts = "once" From 94ebf90449206d8d1c9a13746cf3e107fa65c4d1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 1 Jul 2022 14:35:27 -0400 Subject: [PATCH 296/632] clarify eager loading terminology Change-Id: Idb27ec5f09ef958d71738e1095b4be7a1377eecb (cherry picked from commit 4b5ed6ad27b3b63ecf1b082653e2e3ee0abb887f) --- doc/build/orm/queryguide.rst | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 06d3dace903..747a70abbf9 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1078,10 +1078,12 @@ When ``yield_per`` is used, the set for the Core execution, so that a streaming / server side cursor will be used if the backend supports it. -The ``yield_per`` execution option **is not compatible with subqueryload eager -loading or joinedload eager loading when using collections**. It is -potentially compatible with selectinload eager loading, provided the database -driver supports multiple, independent cursors. +The ``yield_per`` execution option **is not compatible** with +:ref:`"subquery" eager loading ` loading or +:ref:`"joined" eager loading ` when using collections. It +is potentially compatible with :ref:`"select in" eager loading +` , provided the database driver supports multiple, +independent cursors. Additionally, the ``yield_per`` execution option is not compatible with the :meth:`_engine.Result.unique` method; as this method relies upon From 80580991ecebeb173df1e8dcf716508309f60883 Mon Sep 17 00:00:00 2001 From: Cyril Chapellier Date: Thu, 30 Jun 2022 14:59:38 +0200 Subject: [PATCH 297/632] Support lambda expression in mypy plugin Avoid `error: INTERNAL ERROR` when the default is a lambda Fixes: #8196 Change-Id: I7346c693519b024c56156db6f4ffc9a45bb748d3 (cherry picked from commit 472dc0735c4d9385b05e5e2088dad9f507a59a63) --- doc/build/changelog/unreleased_14/8196.rst | 7 +++++++ lib/sqlalchemy/ext/mypy/infer.py | 5 +++++ test/ext/mypy/files/lambda_default.py | 11 +++++++++++ 3 files changed, 23 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8196.rst create mode 100644 test/ext/mypy/files/lambda_default.py diff --git a/doc/build/changelog/unreleased_14/8196.rst b/doc/build/changelog/unreleased_14/8196.rst new file mode 100644 index 00000000000..d5afbb8f7a9 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8196.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mypy + :tickets: 8196 + + Fixed a crash of the mypy plugin when using a lambda as a Column + default. Pull request curtesy of tchapi. + diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index 3cd946e04d0..f88a960bd2e 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -14,6 +14,7 @@ from mypy.nodes import CallExpr from mypy.nodes import Expression from mypy.nodes import FuncDef +from mypy.nodes import LambdaExpr from mypy.nodes import MemberExpr from mypy.nodes import NameExpr from mypy.nodes import RefExpr @@ -387,6 +388,10 @@ class MyClass: elif isinstance(column_arg, (StrExpr,)): # x = Column("name", String), go to next argument continue + elif isinstance(column_arg, (LambdaExpr,)): + # x = Column("name", String, default=lambda: uuid.uuid4()) + # go to next argument + continue else: assert False diff --git a/test/ext/mypy/files/lambda_default.py b/test/ext/mypy/files/lambda_default.py new file mode 100644 index 00000000000..a1019f0d02f --- /dev/null +++ b/test/ext/mypy/files/lambda_default.py @@ -0,0 +1,11 @@ +import uuid + +from sqlalchemy import Column +from sqlalchemy import String +from sqlalchemy.orm import declarative_base + +Base = declarative_base() + + +class MyClass(Base): + id = Column(String, default=lambda: uuid.uuid4(), primary_key=True) From 12c55290a3080f22721e4ad8d60f9742e70c04aa Mon Sep 17 00:00:00 2001 From: Naveen <172697+naveensrinivasan@users.noreply.github.com> Date: Sat, 2 Jul 2022 16:58:16 -0500 Subject: [PATCH 298/632] chore: Set permissions for GitHub actions (#8117) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Restrict the GitHub token permissions only to the required ones; this way, even if the attackers will succeed in compromising your workflow, they won’t be able to do much. - Included permissions for the action. https://github.com/ossf/scorecard/blob/main/docs/checks.md#token-permissions https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions https://docs.github.com/en/actions/using-jobs/assigning-permissions-to-jobs [Keeping your GitHub Actions and workflows secure Part 1: Preventing pwn requests](https://securitylab.github.com/research/github-actions-preventing-pwn-requests/) Signed-off-by: naveen <172697+naveensrinivasan@users.noreply.github.com> (cherry picked from commit a8af28c99431fb62243f025cdea18099dde0c844) --- .github/workflows/run-on-pr.yaml | 3 +++ .github/workflows/run-test.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 2a04a1f0485..352eec3abbf 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -12,6 +12,9 @@ env: # global env to all steps TOX_WORKERS: -n2 +permissions: + contents: read + jobs: run-test-amd64: name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 81b6799e1b2..36dfce250d2 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -16,6 +16,9 @@ env: # global env to all steps TOX_WORKERS: -n2 +permissions: + contents: read + jobs: run-test: name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} From eef077d1a6ba711a30b9706982862d9120fa063b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 3 Jul 2022 11:01:25 -0400 Subject: [PATCH 299/632] dont use import * in any docs, ever (cherry picked from commit 9ab7b9a624cf5dfed93e0927eb4b3b62fe87e5ae) --- doc/build/core/metadata.rst | 2 +- doc/build/core/type_basics.rst | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 5c6fa2e5cbf..d6a8f72dde1 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -16,7 +16,7 @@ and :class:`_schema.MetaData` objects. A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: - from sqlalchemy import * + from sqlalchemy import MetaData metadata_obj = MetaData() diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 3ec50cc0039..069214f99b9 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -209,17 +209,17 @@ Or some PostgreSQL types:: Column('elements', postgresql.ARRAY(String)) ) -Each dialect provides the full set of typenames supported by -that backend within its `__all__` collection, so that a simple -`import *` or similar will import all supported types as -implemented for that backend:: +Each dialect provides the full set of database types supported by +that backend within its own module, so they may all be used +against the module directly without the need to differentiate between +which types are specific to that backend or not:: - from sqlalchemy.dialects.postgresql import * + from sqlalchemy.dialects import postgresql t = Table('mytable', metadata, - Column('id', INTEGER, primary_key=True), - Column('name', VARCHAR(300)), - Column('inetaddr', INET) + Column('id', postgresql.INTEGER, primary_key=True), + Column('name', postgresql.VARCHAR(300)), + Column('inetaddr', postgresql.INET) ) Where above, the INTEGER and VARCHAR types are ultimately from From baff8d67bb286fa934d5b34c222335d19484564e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 3 Jul 2022 11:09:16 -0400 Subject: [PATCH 300/632] fix formatting problems Change-Id: Ib55fe1c60130a45bfbf28de5c74cfe7a30418bb3 (cherry picked from commit 4b3f204d07d53ae09b59ce8f33b534f26a605cd4) --- doc/build/core/metadata.rst | 75 +++++++++++++++++++----------------- lib/sqlalchemy/sql/schema.py | 9 +++-- 2 files changed, 45 insertions(+), 39 deletions(-) diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index d6a8f72dde1..fa1872d68d0 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -13,6 +13,11 @@ Describing Databases with MetaData This section discusses the fundamental :class:`_schema.Table`, :class:`_schema.Column` and :class:`_schema.MetaData` objects. +.. seealso:: + + :ref:`tutorial_working_with_metadata` - tutorial introduction to + SQLAlchemy's database metadata concept in the :ref:`unified_tutorial` + A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`:: @@ -160,41 +165,41 @@ The usual way to issue CREATE is to use that first check for the existence of each individual table, and if not found will issue the CREATE statements: - .. sourcecode:: python+sql - - engine = create_engine('sqlite:///:memory:') - - metadata_obj = MetaData() - - user = Table('user', metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60), key='email'), - Column('nickname', String(50), nullable=False) - ) - - user_prefs = Table('user_prefs', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) - ) - - {sql}metadata_obj.create_all(engine) - PRAGMA table_info(user){} - CREATE TABLE user( - user_id INTEGER NOT NULL PRIMARY KEY, - user_name VARCHAR(16) NOT NULL, - email_address VARCHAR(60), - nickname VARCHAR(50) NOT NULL - ) - PRAGMA table_info(user_prefs){} - CREATE TABLE user_prefs( - pref_id INTEGER NOT NULL PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES user(user_id), - pref_name VARCHAR(40) NOT NULL, - pref_value VARCHAR(100) - ) +.. sourcecode:: python+sql + + engine = create_engine('sqlite:///:memory:') + + metadata_obj = MetaData() + + user = Table('user', metadata_obj, + Column('user_id', Integer, primary_key=True), + Column('user_name', String(16), nullable=False), + Column('email_address', String(60), key='email'), + Column('nickname', String(50), nullable=False) + ) + + user_prefs = Table('user_prefs', metadata_obj, + Column('pref_id', Integer, primary_key=True), + Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), + Column('pref_name', String(40), nullable=False), + Column('pref_value', String(100)) + ) + + {sql}metadata_obj.create_all(engine) + PRAGMA table_info(user){} + CREATE TABLE user( + user_id INTEGER NOT NULL PRIMARY KEY, + user_name VARCHAR(16) NOT NULL, + email_address VARCHAR(60), + nickname VARCHAR(50) NOT NULL + ) + PRAGMA table_info(user_prefs){} + CREATE TABLE user_prefs( + pref_id INTEGER NOT NULL PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES user(user_id), + pref_name VARCHAR(40) NOT NULL, + pref_value VARCHAR(100) + ) :func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index e58f4f0882c..dde665cbde7 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -172,10 +172,11 @@ class Table(DialectKWArgs, SchemaItem, TableClause): e.g.:: - mytable = Table("mytable", metadata, - Column('mytable_id', Integer, primary_key=True), - Column('value', String(50)) - ) + mytable = Table( + "mytable", metadata, + Column('mytable_id', Integer, primary_key=True), + Column('value', String(50)) + ) The :class:`_schema.Table` object constructs a unique instance of itself based From 78fa5961bc37689209df41f5512ecb9cf31bc2e2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 4 Jul 2022 13:05:37 -0400 Subject: [PATCH 301/632] move backref to "legacy" in the interests of consistency as well as new typing features, backref should be considered legacy and is fully superseded by back_populates. this commit is for 2.0 /1.4, in 2.0 further updates will be made for new ORM syntaxes. Change-Id: Idd3b7a3b07843b73304df69e476dc4239c60b3f8 (cherry picked from commit d49dfa74e86778eb5c581470405131ed9f9d0206) --- doc/build/orm/backref.rst | 298 +++++++--------------------- doc/build/orm/cascades.rst | 2 + doc/build/orm/relationships.rst | 2 +- lib/sqlalchemy/orm/__init__.py | 16 +- lib/sqlalchemy/orm/relationships.py | 56 +++--- 5 files changed, 119 insertions(+), 255 deletions(-) diff --git a/doc/build/orm/backref.rst b/doc/build/orm/backref.rst index f52b868f8d9..edc87cd19dd 100644 --- a/doc/build/orm/backref.rst +++ b/doc/build/orm/backref.rst @@ -1,11 +1,43 @@ .. _relationships_backref: -Linking Relationships with Backref ----------------------------------- +Using the legacy 'backref' relationship parameter +-------------------------------------------------- + +.. note:: The :paramref:`_orm.relationship.backref` keyword should be considered + legacy, and use of :paramref:`_orm.relationship.back_populates` with explicit + :func:`_orm.relationship` constructs should be preferred. Using + individual :func:`_orm.relationship` constructs provides advantages + including that both ORM mapped classes will include their attributes + up front as the class is constructed, rather than as a deferred step, + and configuration is more straightforward as all arguments are explicit. + New :pep:`484` features in SQLAlchemy 2.0 also take advantage of + attributes being explicitly present in source code rather than + using dynamic attribute generation. -The :paramref:`_orm.relationship.backref` keyword argument was first introduced in :ref:`ormtutorial_toplevel`, and has been -mentioned throughout many of the examples here. What does it actually do ? Let's start -with the canonical ``User`` and ``Address`` scenario:: +.. seealso:: + + For general information about bidirectional relationships, see the + following sections: + + :ref:`tutorial_orm_related_objects` - in the :ref:`unified_tutorial`, + presents an overview of bi-directional relationship configuration + and behaviors using :paramref:`_orm.relationship.back_populates` + + :ref:`back_populates_cascade` - notes on bi-directional :func:`_orm.relationship` + behavior regarding :class:`_orm.Session` cascade behaviors. + + :paramref:`_orm.relationship.back_populates` + + +The :paramref:`_orm.relationship.backref` keyword argument on the +:func:`_orm.relationship` construct allows the +automatic generation of a new :func:`_orm.relationship` that will be automatically +be added to the ORM mapping for the related class. It will then be +placed into a :paramref:`_orm.relationship.back_populates` configuration +against the current :func:`_orm.relationship` being configured, with both +:func:`_orm.relationship` constructs referring to each other. + +Starting with the following example:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base, relationship @@ -29,12 +61,8 @@ with the canonical ``User`` and ``Address`` scenario:: The above configuration establishes a collection of ``Address`` objects on ``User`` called ``User.addresses``. It also establishes a ``.user`` attribute on ``Address`` which will -refer to the parent ``User`` object. - -In fact, the :paramref:`_orm.relationship.backref` keyword is only a common shortcut for placing a second -:func:`_orm.relationship` onto the ``Address`` mapping, including the establishment -of an event listener on both sides which will mirror attribute operations -in both directions. The above configuration is equivalent to:: +refer to the parent ``User`` object. Using :paramref:`_orm.relationship.back_populates` +it's equivalent to the following:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base, relationship @@ -58,68 +86,23 @@ in both directions. The above configuration is equivalent to:: user = relationship("User", back_populates="addresses") -Above, we add a ``.user`` relationship to ``Address`` explicitly. On -both relationships, the :paramref:`_orm.relationship.back_populates` directive tells each relationship -about the other one, indicating that they should establish "bidirectional" -behavior between each other. The primary effect of this configuration -is that the relationship adds event handlers to both attributes -which have the behavior of "when an append or set event occurs here, set ourselves -onto the incoming attribute using this particular attribute name". -The behavior is illustrated as follows. Start with a ``User`` and an ``Address`` -instance. The ``.addresses`` collection is empty, and the ``.user`` attribute -is ``None``:: - - >>> u1 = User() - >>> a1 = Address() - >>> u1.addresses - [] - >>> print(a1.user) - None - -However, once the ``Address`` is appended to the ``u1.addresses`` collection, -both the collection and the scalar attribute have been populated:: - - >>> u1.addresses.append(a1) - >>> u1.addresses - [<__main__.Address object at 0x12a6ed0>] - >>> a1.user - <__main__.User object at 0x12a6590> - -This behavior of course works in reverse for removal operations as well, as well -as for equivalent operations on both sides. Such as -when ``.user`` is set again to ``None``, the ``Address`` object is removed -from the reverse collection:: - - >>> a1.user = None - >>> u1.addresses - [] - -The manipulation of the ``.addresses`` collection and the ``.user`` attribute -occurs entirely in Python without any interaction with the SQL database. -Without this behavior, the proper state would be apparent on both sides once the -data has been flushed to the database, and later reloaded after a commit or -expiration operation occurs. The :paramref:`_orm.relationship.backref`/:paramref:`_orm.relationship.back_populates` behavior has the advantage -that common bidirectional operations can reflect the correct state without requiring -a database round trip. - -Remember, when the :paramref:`_orm.relationship.backref` keyword is used on a single relationship, it's -exactly the same as if the above two relationships were created individually -using :paramref:`_orm.relationship.back_populates` on each. - -Backref Arguments -~~~~~~~~~~~~~~~~~ - -We've established that the :paramref:`_orm.relationship.backref` keyword is merely a shortcut for building -two individual :func:`_orm.relationship` constructs that refer to each other. Part of -the behavior of this shortcut is that certain configurational arguments applied to -the :func:`_orm.relationship` -will also be applied to the other direction - namely those arguments that describe -the relationship at a schema level, and are unlikely to be different in the reverse -direction. The usual case -here is a many-to-many :func:`_orm.relationship` that has a :paramref:`_orm.relationship.secondary` argument, -or a one-to-many or many-to-one which has a :paramref:`_orm.relationship.primaryjoin` argument (the -:paramref:`_orm.relationship.primaryjoin` argument is discussed in :ref:`relationship_primaryjoin`). Such -as if we limited the list of ``Address`` objects to those which start with "tony":: +The behavior of the ``User.addresses`` and ``Address.user`` relationships +is that they now behave in a **bi-directional** way, indicating that +changes on one side of the relationship impact the other. An example +and discussion of this behavior is in the :ref:`unified_tutorial` +at :ref:`tutorial_orm_related_objects`. + + +Backref Default Arguments +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Since :paramref:`_orm.relationship.backref` generates a whole new +:func:`_orm.relationship`, the generation process by default +will attempt to include corresponding arguments in the new +:func:`_orm.relationship` that correspond to the original arguments. +As an example, below is a :func:`_orm.relationship` that includes a +:ref:`custom join condition ` +which also includes the :paramref:`_orm.relationship.backref` keyword:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import declarative_base, relationship @@ -147,8 +130,8 @@ as if we limited the list of ``Address`` objects to those which start with "tony email = Column(String) user_id = Column(Integer, ForeignKey("user.id")) -We can observe, by inspecting the resulting property, that both sides -of the relationship have this join condition applied:: +When the "backref" is generated, the :paramref:`_orm.relationship.primaryjoin` +condition is copied to the new :func:`_orm.relationship` as well:: >>> print(User.addresses.property.primaryjoin) "user".id = address.user_id AND address.email LIKE :email_1 || '%%' @@ -157,22 +140,26 @@ of the relationship have this join condition applied:: "user".id = address.user_id AND address.email LIKE :email_1 || '%%' >>> -This reuse of arguments should pretty much do the "right thing" - it -uses only arguments that are applicable, and in the case of a many-to- -many relationship, will reverse the usage of +Other arguments that are transferrable include the +:paramref:`_orm.relationship.secondary` parameter that refers to a +many-to-many association table, as well as the "join" arguments :paramref:`_orm.relationship.primaryjoin` and -:paramref:`_orm.relationship.secondaryjoin` to correspond to the other -direction (see the example in :ref:`self_referential_many_to_many` for -this). +:paramref:`_orm.relationship.secondaryjoin`; "backref" is smart enough to know +that these two arguments should also be "reversed" when generating +the opposite side. + +Specifying Backref Arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -It's very often the case however that we'd like to specify arguments -that are specific to just the side where we happened to place the -"backref". This includes :func:`_orm.relationship` arguments like +Lots of other arguments for a "backref" are not implicit, and +include arguments like :paramref:`_orm.relationship.lazy`, :paramref:`_orm.relationship.remote_side`, :paramref:`_orm.relationship.cascade` and :paramref:`_orm.relationship.cascade_backrefs`. For this case we use -the :func:`.backref` function in place of a string:: +the :func:`.backref` function in place of a string; this will store +a specific set of arguments that will be transferred to the new +:func:`_orm.relationship` when generated:: # from sqlalchemy.orm import backref @@ -195,144 +182,3 @@ returned ``Address``. The :func:`.backref` function formatted the arguments we it into a form that is interpreted by the receiving :func:`_orm.relationship` as additional arguments to be applied to the new relationship it creates. -Setting cascade for backrefs -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -A key behavior that occurs in the 1.x series of SQLAlchemy regarding backrefs -is that :ref:`cascades ` will occur bidirectionally by -default. This basically means, if one starts with an ``User`` object -that's been persisted in the :class:`.Session`:: - - user = session.query(User).filter(User.id == 1).first() - -The above ``User`` is :term:`persistent` in the :class:`.Session`. It usually -is intuitive that if we create an ``Address`` object and append to the -``User.addresses`` collection, it is automatically added to the -:class:`.Session` as in the example below:: - - user = session.query(User).filter(User.id == 1).first() - address = Address(email_address='foo') - user.addresses.append(address) - -The above behavior is known as the "save update cascade" and is described -in the section :ref:`unitofwork_cascades`. - -However, if we instead created a new ``Address`` object, and associated the -``User`` object with the ``Address`` as follows:: - - address = Address(email_address='foo', user=user) - -In the above example, it is **not** as intuitive that the ``Address`` would -automatically be added to the :class:`.Session`. However, the backref behavior -of ``Address.user`` indicates that the ``Address`` object is also appended to -the ``User.addresses`` collection. This in turn initiates a **cascade** -operation which indicates that this ``Address`` should be placed into the -:class:`.Session` as a :term:`pending` object. - -Since this behavior has been identified as counter-intuitive to most people, -it can be disabled by setting :paramref:`_orm.relationship.cascade_backrefs` -to False, as in:: - - - class User(Base): - # ... - - addresses = relationship("Address", back_populates="user", cascade_backrefs=False) - -See the example in :ref:`backref_cascade` for further information. - -.. seealso:: - - :ref:`backref_cascade`. - - -One Way Backrefs -~~~~~~~~~~~~~~~~ - -An unusual case is that of the "one way backref". This is where the -"back-populating" behavior of the backref is only desirable in one -direction. An example of this is a collection which contains a -filtering :paramref:`_orm.relationship.primaryjoin` condition. We'd -like to append items to this collection as needed, and have them -populate the "parent" object on the incoming object. However, we'd -also like to have items that are not part of the collection, but still -have the same "parent" association - these items should never be in -the collection. - -Taking our previous example, where we established a -:paramref:`_orm.relationship.primaryjoin` that limited the collection -only to ``Address`` objects whose email address started with the word -``tony``, the usual backref behavior is that all items populate in -both directions. We wouldn't want this behavior for a case like the -following:: - - >>> u1 = User() - >>> a1 = Address(email='mary') - >>> a1.user = u1 - >>> u1.addresses - [<__main__.Address object at 0x1411910>] - -Above, the ``Address`` object that doesn't match the criterion of "starts with 'tony'" -is present in the ``addresses`` collection of ``u1``. After these objects are flushed, -the transaction committed and their attributes expired for a re-load, the ``addresses`` -collection will hit the database on next access and no longer have this ``Address`` object -present, due to the filtering condition. But we can do away with this unwanted side -of the "backref" behavior on the Python side by using two separate :func:`_orm.relationship` constructs, -placing :paramref:`_orm.relationship.back_populates` only on one side:: - - from sqlalchemy import Column, ForeignKey, Integer, String - from sqlalchemy.orm import declarative_base, relationship - - Base = declarative_base() - - - class User(Base): - __tablename__ = "user" - id = Column(Integer, primary_key=True) - name = Column(String) - - addresses = relationship( - "Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.email.startswith('tony'))", - back_populates="user", - ) - - - class Address(Base): - __tablename__ = "address" - id = Column(Integer, primary_key=True) - email = Column(String) - user_id = Column(Integer, ForeignKey("user.id")) - - user = relationship("User") - -With the above scenario, appending an ``Address`` object to the ``.addresses`` -collection of a ``User`` will always establish the ``.user`` attribute on that -``Address``:: - - >>> u1 = User() - >>> a1 = Address(email='tony') - >>> u1.addresses.append(a1) - >>> a1.user - <__main__.User object at 0x1411850> - -However, applying a ``User`` to the ``.user`` attribute of an ``Address``, -will not append the ``Address`` object to the collection:: - - >>> a2 = Address(email='mary') - >>> a2.user = u1 - >>> a2 in u1.addresses - False - -Of course, we've disabled some of the usefulness of -:paramref:`_orm.relationship.backref` here, in that when we do append an -``Address`` that corresponds to the criteria of -``email.startswith('tony')``, it won't show up in the -``User.addresses`` collection until the session is flushed, and the -attributes reloaded after a commit or expire operation. While we -could consider an attribute event that checks this criterion in -Python, this starts to cross the line of duplicating too much SQL -behavior in Python. The backref behavior itself is only a slight -transgression of this philosophy - SQLAlchemy tries to keep these to a -minimum overall. diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 466c1975cec..3c1180404c1 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -570,6 +570,8 @@ expunge from the :class:`.Session` using :meth:`.Session.expunge`, the operation should be propagated down to referred objects. +.. _back_populates_cascade: + .. _backref_cascade: Controlling Cascade on Backrefs diff --git a/doc/build/orm/relationships.rst b/doc/build/orm/relationships.rst index b9111741ccf..0c12ba1a4b3 100644 --- a/doc/build/orm/relationships.rst +++ b/doc/build/orm/relationships.rst @@ -14,9 +14,9 @@ of its usage. For an introduction to relationships, start with the basic_relationships self_referential - backref join_conditions collections relationship_persistence + backref relationship_api diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 78650507ee6..6e0de05c6d3 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -172,18 +172,24 @@ def dynamic_loader(argument, **kw): def backref(name, **kwargs): - """Create a back reference with explicit keyword arguments, which are the - same arguments one can send to :func:`relationship`. + """When using the :paramref:`_orm.relationship.backref` parameter, + provides specific parameters to be used when the new + :func:`_orm.relationship` is generated. - Used with the ``backref`` keyword argument to :func:`relationship` in - place of a string argument, e.g.:: + E.g.:: 'items':relationship( SomeItem, backref=backref('parent', lazy='subquery')) + The :paramref:`_orm.relationship.backref` parameter is generally + considered to be legacy; for modern applications, using + explicit :func:`_orm.relationship` constructs linked together using + the :paramref:`_orm.relationship.back_populates` parameter should be + preferred. + .. seealso:: - :ref:`relationships_backref` + :ref:`relationships_backref` - background on backrefs """ diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index f58277e32ea..b51ea0e0097 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -311,41 +311,51 @@ class name or dotted package-qualified name. the "previous" value of the attribute. :param backref: - Indicates the string name of a property to be placed on the related - mapper's class that will handle this relationship in the other - direction. The other property will be created automatically - when the mappers are configured. Can also be passed as a - :func:`.backref` object to control the configuration of the - new relationship. + A reference to a string relationship name, or a :func:`_orm.backref` + construct, which will be used to automatically generate a new + :func:`_orm.relationship` on the related class, which then refers to + this one using a bi-directional + :paramref:`_orm.relationship.back_populates` configuration. + + In modern Python, explicit use of :func:`_orm.relationship` with + :paramref:`_orm.relationship.back_populates` should be preferred, as + it is more robust in terms of mapper configuration as well as more + conceptually straightforward. It also integrates with new :pep:`484` + typing features introduced in SQLAlchemy 2.0 which is not possible + with dynamically generated attributes. .. seealso:: - :ref:`relationships_backref` - Introductory documentation and - examples. + :ref:`relationships_backref` - notes on using + :paramref:`_orm.relationship.backref` - :paramref:`_orm.relationship.back_populates` - alternative form - of backref specification. + :ref:`tutorial_orm_related_objects` - in the + :ref:`unified_tutorial`, presents an overview of bi-directional + relationship configuration and behaviors using + :paramref:`_orm.relationship.back_populates` - :func:`.backref` - allows control over :func:`_orm.relationship` - configuration when using :paramref:`_orm.relationship.backref`. + :func:`.backref` - allows control over :func:`_orm.relationship` + configuration when using :paramref:`_orm.relationship.backref`. :param back_populates: - Takes a string name and has the same meaning as - :paramref:`_orm.relationship.backref`, except the complementing - property is **not** created automatically, and instead must be - configured explicitly on the other mapper. The complementing - property should also indicate - :paramref:`_orm.relationship.back_populates` to this relationship to - ensure proper functioning. + Indicates the name of a :func:`_orm.relationship` on the related + class that will be synchronized with this one. It is usually + expected that the :func:`_orm.relationship` on the related class + also refer to this one. This allows objects on both sides of + each :func:`_orm.relationship` to synchronize in-Python state + changes and also provides directives to the :term:`unit of work` + flush process how changes along these relationships should + be persisted. .. seealso:: - :ref:`relationships_backref` - Introductory documentation and - examples. + :ref:`tutorial_orm_related_objects` - in the + :ref:`unified_tutorial`, presents an overview of bi-directional + relationship configuration and behaviors. - :paramref:`_orm.relationship.backref` - alternative form - of backref specification. + :ref:`relationship_patterns` - includes many examples of + :paramref:`_orm.relationship.back_populates`. :param overlaps: A string name or comma-delimited set of names of other relationships From 844365266aeb2582d775d019c48e7ffa6113c673 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 5 Jul 2022 21:05:18 -0400 Subject: [PATCH 302/632] generalize sql server check for id col to accommodate ORM cases Fixed issues that prevented the new usage patterns for using DML with ORM objects presented at :ref:`orm_dml_returning_objects` from working correctly with the SQL Server pyodbc dialect. Here we add a step to look in compile_state._dict_values more thoroughly for the keys we need to determine "identity insert" or not, and also add a new compiler variable dml_compile_state so that we can skip the ORM's compile_state if present. Fixes: #8210 Change-Id: Idbd76bb3eb075c647dc6c1cb78f7315c821e15f7 (cherry picked from commit 5806428800d2f1ac775156f90497a2fc3a644f35) --- doc/build/changelog/unreleased_14/8210.rst | 8 ++++ lib/sqlalchemy/dialects/mssql/base.py | 7 +--- lib/sqlalchemy/sql/compiler.py | 18 +++++++++ lib/sqlalchemy/sql/dml.py | 8 ++++ test/orm/test_update_delete.py | 44 +++++++++++++++++----- test/sql/test_insert_exec.py | 28 ++++++++++++-- 6 files changed, 95 insertions(+), 18 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8210.rst diff --git a/doc/build/changelog/unreleased_14/8210.rst b/doc/build/changelog/unreleased_14/8210.rst new file mode 100644 index 00000000000..f99d86194f5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8210.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mssql + :tickets: 8210 + + Fixed issues that prevented the new usage patterns for using DML with ORM + objects presented at :ref:`orm_dml_returning_objects` from working + correctly with the SQL Server pyodbc dialect. + diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 1658f27c70c..735cc3cff82 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1651,15 +1651,12 @@ def pre_exec(self): ) if insert_has_identity: - compile_state = self.compiled.compile_state + compile_state = self.compiled.dml_compile_state self._enable_identity_insert = ( id_column.key in self.compiled_parameters[0] ) or ( compile_state._dict_parameters - and ( - id_column.key in compile_state._dict_parameters - or id_column in compile_state._dict_parameters - ) + and (id_column.key in compile_state._insert_col_keys) ) else: diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 477c199c175..667dd7d3de5 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -402,6 +402,18 @@ class Compiled(object): """ + dml_compile_state = None + """Optional :class:`.CompileState` assigned at the same point that + .isinsert, .isupdate, or .isdelete is assigned. + + This will normally be the same object as .compile_state, with the + exception of cases like the :class:`.ORMFromStatementCompileState` + object. + + .. versionadded:: 1.4.40 + + """ + cache_key = None _gen_time = None @@ -3838,6 +3850,8 @@ def visit_insert(self, insert_stmt, **kw): if toplevel: self.isinsert = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state @@ -4008,6 +4022,8 @@ def visit_update(self, update_stmt, **kw): toplevel = not self.stack if toplevel: self.isupdate = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state @@ -4134,6 +4150,8 @@ def visit_delete(self, delete_stmt, **kw): toplevel = not self.stack if toplevel: self.isdelete = True + if not self.dml_compile_state: + self.dml_compile_state = compile_state if not self.compile_state: self.compile_state = compile_state diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index dea5d6119df..4a343147c96 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -189,6 +189,14 @@ def __init__(self, statement, compiler, **kw): if statement._multi_values: self._process_multi_values(statement) + @util.memoized_property + def _insert_col_keys(self): + # this is also done in crud.py -> _key_getters_for_crud_column + return [ + coercions.expect_as_key(roles.DMLColumnRole, col) + for col in self._dict_parameters + ] + @CompileState.plugin_for("default", "update") class UpdateDMLState(DMLState): diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py index 255d70f4142..4eabe2f6c49 100644 --- a/test/orm/test_update_delete.py +++ b/test/orm/test_update_delete.py @@ -2217,21 +2217,45 @@ def test_load_from_update(self, connection): [User(name="jack", age=52), User(name="jill", age=34)], ) - def test_load_from_insert(self, connection): + @testing.combinations( + ("single",), + ("multiple", testing.requires.multivalues_inserts), + argnames="params", + ) + def test_load_from_insert(self, connection, params): User = self.classes.User - stmt = ( - insert(User) - .values({User.id: 5, User.age: 25, User.name: "spongebob"}) - .returning(User) - ) + if params == "multiple": + values = [ + {User.id: 5, User.age: 25, User.name: "spongebob"}, + {User.id: 6, User.age: 30, User.name: "patrick"}, + {User.id: 7, User.age: 35, User.name: "squidward"}, + ] + elif params == "single": + values = {User.id: 5, User.age: 25, User.name: "spongebob"} + else: + assert False + + stmt = insert(User).values(values).returning(User) stmt = select(User).from_statement(stmt) with Session(connection) as sess: rows = sess.execute(stmt).scalars().all() - eq_( - rows, - [User(name="spongebob", age=25)], - ) + if params == "multiple": + eq_( + rows, + [ + User(name="spongebob", age=25), + User(name="patrick", age=30), + User(name="squidward", age=35), + ], + ) + elif params == "single": + eq_( + rows, + [User(name="spongebob", age=25)], + ) + else: + assert False diff --git a/test/sql/test_insert_exec.py b/test/sql/test_insert_exec.py index 76b4ba01ea8..334df9575e9 100644 --- a/test/sql/test_insert_exec.py +++ b/test/sql/test_insert_exec.py @@ -20,6 +20,14 @@ from sqlalchemy.testing.schema import Table +class ExpectExpr: + def __init__(self, element): + self.element = element + + def __clause_element__(self): + return self.element + + class InsertExecTest(fixtures.TablesTest): __backend__ = True @@ -36,13 +44,27 @@ def define_tables(cls, metadata): ) @testing.requires.multivalues_inserts - def test_multivalues_insert(self, connection): + @testing.combinations("string", "column", "expect", argnames="keytype") + def test_multivalues_insert(self, connection, keytype): + users = self.tables.users + + if keytype == "string": + user_id, user_name = "user_id", "user_name" + elif keytype == "column": + user_id, user_name = users.c.user_id, users.c.user_name + elif keytype == "expect": + user_id, user_name = ExpectExpr(users.c.user_id), ExpectExpr( + users.c.user_name + ) + else: + assert False + connection.execute( users.insert().values( [ - {"user_id": 7, "user_name": "jack"}, - {"user_id": 8, "user_name": "ed"}, + {user_id: 7, user_name: "jack"}, + {user_id: 8, user_name: "ed"}, ] ) ) From d245a3deac6e5c1efbaf04caec4de8cb3ee44ad5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 7 Jul 2022 11:44:09 -0400 Subject: [PATCH 303/632] document using fetch() with Oracle We implemented working FETCH support, but it's not yet implied by limit/offset. The docs make no mention that this is available which is very misleading including to maintainers. Make it clear that fetch() support is there right now, it's just not yet implicit with limit/offset. Change-Id: Ib2231dcdd80a8bf3ac4bbf590e1a8dfeac31e9da References: #8221 (cherry picked from commit 805a1323b973a30af99ce506dd5c5c4ab96cff0f) --- lib/sqlalchemy/dialects/oracle/base.py | 52 ++++++++++++++++---------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 49ee47959ab..77f0dbd2df6 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -255,36 +255,48 @@ version of Oracle server (compatibility version < 12.2) is detected. -LIMIT/OFFSET Support --------------------- +LIMIT/OFFSET/FETCH Support +-------------------------- -Oracle has no direct support for LIMIT and OFFSET until version 12c. -To achieve this behavior across all widely used versions of Oracle starting -with the 8 series, SQLAlchemy currently makes use of ROWNUM to achieve -LIMIT/OFFSET; the exact methodology is taken from -https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results . +Methods like :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset` currently +use an emulated approach for LIMIT / OFFSET based on window functions, which +involves creation of a subquery using ``ROW_NUMBER`` that is prone to +performance issues as well as SQL construction issues for complex statements. +However, this approach is supported by all Oracle versions. See notes below. -There is currently a single option to affect its behavior: +When using Oracle 12c and above, use the :meth:`_sql.Select.fetch` method +instead; this will render the more modern +``FETCH FIRST N ROW / OFFSET N ROWS`` syntax. + +Notes on LIMIT / OFFSET emulation (when fetch() method cannot be used) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If using :meth:`_sql.Select.limit` and :meth:`_sql.Select.offset`, +or with the ORM the :meth:`_orm.Query.limit` and :meth:`_orm.Query.offset` methods, +and the :meth:`_sql.Select.fetch` method **cannot** be used instead, the following +notes apply: + +* SQLAlchemy currently makes use of ROWNUM to achieve + LIMIT/OFFSET; the exact methodology is taken from + https://blogs.oracle.com/oraclemagazine/on-rownum-and-limiting-results . * the "FIRST_ROWS()" optimization keyword is not used by default. To enable the usage of this optimization directive, specify ``optimize_limits=True`` to :func:`_sa.create_engine`. -.. versionchanged:: 1.4 - The Oracle dialect renders limit/offset integer values using a "post - compile" scheme which renders the integer directly before passing the - statement to the cursor for execution. The ``use_binds_for_limits`` flag - no longer has an effect. - - .. seealso:: + .. versionchanged:: 1.4 + The Oracle dialect renders limit/offset integer values using a "post + compile" scheme which renders the integer directly before passing the + statement to the cursor for execution. The ``use_binds_for_limits`` flag + no longer has an effect. - :ref:`change_4808`. + .. seealso:: -Support for changing the row number strategy, which would include one that -makes use of the ``row_number()`` window function as well as one that makes -use of the Oracle 12c "FETCH FIRST N ROW / OFFSET N ROWS" keywords may be -added in a future release. + :ref:`change_4808`. +* A future release may use ``FETCH FIRST N ROW / OFFSET N ROWS`` automatically + when :meth:`_sql.Select.limit`, :meth:`_sql.Select.offset`, :meth:`_orm.Query.limit`, + :meth:`_orm.Query.offset` are used. .. _oracle_returning: From 1b18740b99a02df5c200ce59c4f81dcf999fda2b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 10 Jul 2022 21:24:17 -0400 Subject: [PATCH 304/632] support "SELECT *" for ORM queries A :func:`_sql.select` construct that is passed a sole '*' argument for ``SELECT *``, either via string, :func:`_sql.text`, or :func:`_sql.literal_column`, will be interpreted as a Core-level SQL statement rather than as an ORM level statement. This is so that the ``*``, when expanded to match any number of columns, will result in all columns returned in the result. the ORM- level interpretation of :func:`_sql.select` needs to know the names and types of all ORM columns up front which can't be achieved when ``'*'`` is used. If ``'*`` is used amongst other expressions simultaneously with an ORM statement, an error is raised as this can't be interpreted correctly by the ORM. Fixes: #8235 Change-Id: Ic8e84491e14acdc8570704eadeaeaf6e16b1e870 (cherry picked from commit 3916bfc9ccf2904f69498075849a82ceee225b3a) --- doc/build/changelog/unreleased_14/8235.rst | 16 ++++ lib/sqlalchemy/orm/context.py | 12 +++ lib/sqlalchemy/sql/elements.py | 9 +++ test/orm/test_loading.py | 88 ++++++++++++++++++++++ 4 files changed, 125 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8235.rst diff --git a/doc/build/changelog/unreleased_14/8235.rst b/doc/build/changelog/unreleased_14/8235.rst new file mode 100644 index 00000000000..ea5726e10d9 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8235.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, orm + :tickets: 8235 + + A :func:`_sql.select` construct that is passed a sole '*' argument for + ``SELECT *``, either via string, :func:`_sql.text`, or + :func:`_sql.literal_column`, will be interpreted as a Core-level SQL + statement rather than as an ORM level statement. This is so that the ``*``, + when expanded to match any number of columns, will result in all columns + returned in the result. the ORM- level interpretation of + :func:`_sql.select` needs to know the names and types of all ORM columns up + front which can't be achieved when ``'*'`` is used. + + If ``'*`` is used amongst other expressions simultaneously with an ORM + statement, an error is raised as this can't be interpreted correctly by the + ORM. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 7cedc2b43cb..9d4f652ea4f 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -178,6 +178,7 @@ class default_compile_options(CacheableOptions): ("_set_base_alias", InternalTraversal.dp_boolean), ("_for_refresh_state", InternalTraversal.dp_boolean), ("_render_for_subquery", InternalTraversal.dp_boolean), + ("_is_star", InternalTraversal.dp_boolean), ] # set to True by default from Query._statement_20(), to indicate @@ -202,6 +203,7 @@ class default_compile_options(CacheableOptions): _set_base_alias = False _for_refresh_state = False _render_for_subquery = False + _is_star = False current_path = _path_registry @@ -336,6 +338,8 @@ def orm_setup_cursor_result( load_options = execution_options.get( "_sa_orm_load_options", QueryContext.default_load_options ) + if compile_state.compile_options._is_star: + return result querycontext = QueryContext( compile_state, @@ -860,6 +864,11 @@ def _setup_for_generate(self): self._for_update_arg = query._for_update_arg + if self.compile_options._is_star and (len(self._entities) != 1): + raise sa_exc.CompileError( + "Can't generate ORM query that includes multiple expressions " + "at the same time as '*'; query for '*' alone if present" + ) for entity in self._entities: entity.setup_compile_state(self) @@ -2941,6 +2950,9 @@ def __init__( self.raw_column_index = raw_column_index self.translate_raw_column = raw_column_index is not None + if column._is_star: + compile_state.compile_options += {"_is_star": True} + if not is_current_entities or column._is_text_clause: self._label_name = None else: diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index a1891f19cab..c9cea23dadd 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -216,6 +216,7 @@ class ClauseElement( _is_lambda_element = False _is_singleton_constant = False _is_immutable = False + _is_star = False _order_by_label_element = None @@ -1803,6 +1804,10 @@ def _select_iterable(self): _allow_label_resolve = False + @property + def _is_star(self): + return self.text == "*" + def __init__(self, text, bind=None): self._bind = bind self._bindparams = {} @@ -4795,6 +4800,10 @@ class is usable by itself in those cases where behavioral requirements _is_multiparam_column = False + @property + def _is_star(self): + return self.is_literal and self.name == "*" + def __init__(self, text, type_=None, is_literal=False, _selectable=None): """Produce a :class:`.ColumnClause` object. diff --git a/test/orm/test_loading.py b/test/orm/test_loading.py index 88a160b5a83..cc3c3f49424 100644 --- a/test/orm/test_loading.py +++ b/test/orm/test_loading.py @@ -1,12 +1,16 @@ from sqlalchemy import exc +from sqlalchemy import literal +from sqlalchemy import literal_column from sqlalchemy import select from sqlalchemy import testing +from sqlalchemy import text from sqlalchemy.orm import loading from sqlalchemy.orm import relationship from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from . import _fixtures @@ -14,6 +18,90 @@ # class LoadOnIdentTest(_fixtures.FixtureTest): +class SelectStarTest(_fixtures.FixtureTest): + run_setup_mappers = "once" + run_inserts = "once" + run_deletes = None + + @classmethod + def setup_mappers(cls): + cls._setup_stock_mapping() + + @testing.combinations( + "plain", "text", "literal_column", argnames="exprtype" + ) + @testing.combinations("core", "orm", argnames="coreorm") + def test_single_star(self, exprtype, coreorm): + """test for #8235""" + User, Address = self.classes("User", "Address") + + if exprtype == "plain": + star = "*" + elif exprtype == "text": + star = text("*") + elif exprtype == "literal_column": + star = literal_column("*") + else: + assert False + + stmt = ( + select(star) + .select_from(User) + .join(Address) + .where(User.id == 7) + .order_by(User.id, Address.id) + ) + + s = fixture_session() + + if coreorm == "core": + result = s.connection().execute(stmt) + elif coreorm == "orm": + result = s.execute(stmt) + else: + assert False + + eq_(result.all(), [(7, "jack", 1, 7, "jack@bean.com")]) + + @testing.combinations( + "plain", "text", "literal_column", argnames="exprtype" + ) + @testing.combinations( + lambda User, star: (star, User.id), + lambda User, star: (star, User), + lambda User, star: (User.id, star), + lambda User, star: (User, star), + lambda User, star: (literal("some text"), star), + lambda User, star: (star, star), + lambda User, star: (star, text("some text")), + argnames="testcase", + ) + def test_no_star_orm_combinations(self, exprtype, testcase): + """test for #8235""" + User = self.classes.User + + if exprtype == "plain": + star = "*" + elif exprtype == "text": + star = text("*") + elif exprtype == "literal_column": + star = literal_column("*") + else: + assert False + + args = testing.resolve_lambda(testcase, User=User, star=star) + stmt = select(*args).select_from(User) + + s = fixture_session() + + with expect_raises_message( + exc.CompileError, + r"Can't generate ORM query that includes multiple expressions " + r"at the same time as '\*';", + ): + s.execute(stmt) + + class InstanceProcessorTest(_fixtures.FixtureTest): def test_state_no_load_path_comparison(self): # test issue #5110 From 25090bd7d18e4750e1a835de0f0b863fe08daf52 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Fri, 8 Jul 2022 11:50:54 -0600 Subject: [PATCH 305/632] Update docs for association_proxy Make naming more consistent and explicit Change-Id: If90de9ab8b10348d6d1547b9fd6e3b2c312d9ee8 (cherry picked from commit 9f295d1e9ce9ffbdd93e6d9e2537fd9a353c1a2f) --- doc/build/orm/extensions/associationproxy.rst | 188 +++++++++++------- 1 file changed, 120 insertions(+), 68 deletions(-) diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index 8e2b63910e2..6c7bfcee075 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -33,7 +33,7 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - kw = relationship("Keyword", secondary=lambda: userkeywords_table) + kw = relationship("Keyword", secondary=lambda: user_keyword_table) def __init__(self, name): self.name = name @@ -48,8 +48,8 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa self.keyword = keyword - userkeywords_table = Table( - "userkeywords", + user_keyword_table = Table( + "user_keyword", Base.metadata, Column("user_id", Integer, ForeignKey("user.id"), primary_key=True), Column("keyword_id", Integer, ForeignKey("keyword.id"), primary_key=True), @@ -79,7 +79,7 @@ value of ``.keyword`` associated with each ``Keyword`` object:: __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) - kw = relationship("Keyword", secondary=lambda: userkeywords_table) + kw = relationship("Keyword", secondary=lambda: user_keyword_table) def __init__(self, name): self.name = name @@ -158,48 +158,51 @@ relationship, and is described at :ref:`association_pattern`. Association proxies are useful for keeping "association objects" out of the way during regular use. -Suppose our ``userkeywords`` table above had additional columns +Suppose our ``user_keyword`` table above had additional columns which we'd like to map explicitly, but in most cases we don't require direct access to these attributes. Below, we illustrate -a new mapping which introduces the ``UserKeyword`` class, which -is mapped to the ``userkeywords`` table illustrated earlier. +a new mapping which introduces the ``UserKeywordAssociation`` class, which +is mapped to the ``user_keyword`` table illustrated earlier. This class adds an additional column ``special_key``, a value which we occasionally want to access, but not in the usual case. We create an association proxy on the ``User`` class called -``keywords``, which will bridge the gap from the ``user_keywords`` +``keywords``, which will bridge the gap from the ``user_keyword_associations`` collection of ``User`` to the ``.keyword`` attribute present on each -``UserKeyword``:: +``UserKeywordAssociation``:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship Base = declarative_base() class User(Base): __tablename__ = "user" + id = Column(Integer, primary_key=True) name = Column(String(64)) - # association proxy of "user_keywords" collection + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + cascade="all, delete-orphan", + ) + # association proxy of "user_keyword_associations" collection # to "keyword" attribute - keywords = association_proxy("user_keywords", "keyword") + keywords = association_proxy("user_keyword_associations", "keyword") def __init__(self, name): self.name = name - class UserKeyword(Base): + class UserKeywordAssociation(Base): __tablename__ = "user_keyword" user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String(50)) - # bidirectional attribute/collection of "user"/"user_keywords" - user = relationship( - User, backref=backref("user_keywords", cascade="all, delete-orphan") - ) + user = relationship(User, back_populates="user_keyword_associations") # reference to the "Keyword" object keyword = relationship("Keyword") @@ -223,7 +226,7 @@ collection of ``User`` to the ``.keyword`` attribute present on each With the above configuration, we can operate upon the ``.keywords`` collection of each ``User`` object, each of which exposes a collection of ``Keyword`` -objects that are obtained from the underyling ``UserKeyword`` elements:: +objects that are obtained from the underlying ``UserKeywordAssociation`` elements:: >>> user = User("log") @@ -238,29 +241,29 @@ This example is in contrast to the example illustrated previously at a collection of strings, rather than a collection of composed objects. In this case, each ``.keywords.append()`` operation is equivalent to:: - >>> user.user_keywords.append(UserKeyword(Keyword("its_heavy"))) + >>> user.user_keyword_associations.append(UserKeywordAssociation(Keyword("its_heavy"))) -The ``UserKeyword`` association object has two attributes that are both +The ``UserKeywordAssociation`` object has two attributes that are both populated within the scope of the ``append()`` operation of the association proxy; ``.keyword``, which refers to the ``Keyword` object, and ``.user``, which refers to the ``User``. The ``.keyword`` attribute is populated first, as the association proxy -generates a new ``UserKeyword`` object in response to the ``.append()`` +generates a new ``UserKeywordAssociation`` object in response to the ``.append()`` operation, assigning the given ``Keyword`` instance to the ``.keyword`` -attribute. Then, as the ``UserKeyword`` object is appended to the -``User.user_keywords`` collection, the ``UserKeyword.user`` attribute, -configured as ``back_populates`` for ``User.user_keywords``, is initialized -upon the given ``UserKeyword`` instance to refer to the parent ``User`` +attribute. Then, as the ``UserKeywordAssociation`` object is appended to the +``User.user_keyword_associations`` collection, the ``UserKeywordAssociation.user`` attribute, +configured as ``back_populates`` for ``User.user_keyword_associations``, is initialized +upon the given ``UserKeywordAssociation`` instance to refer to the parent ``User`` receiving the append operation. The ``special_key`` argument above is left at its default value of ``None``. For those cases where we do want ``special_key`` to have a value, we -create the ``UserKeyword`` object explicitly. Below we assign all +create the ``UserKeywordAssociation`` object explicitly. Below we assign all three attributes, wherein the assignment of ``.user`` during -construction, has the effect of appending the new ``UserKeyword`` to -the ``User.user_keywords`` collection (via the relationship):: +construction, has the effect of appending the new ``UserKeywordAssociation`` to +the ``User.user_keyword_associations`` collection (via the relationship):: - >>> UserKeyword(Keyword("its_wood"), user, special_key="my special key") + >>> UserKeywordAssociation(Keyword("its_wood"), user, special_key="my special key") The association proxy returns to us a collection of ``Keyword`` objects represented by all these operations:: @@ -285,15 +288,15 @@ arguments to the creation function instead of one, the key and the value. As always, this creation function defaults to the constructor of the intermediary class, and can be customized using the ``creator`` argument. -Below, we modify our ``UserKeyword`` example such that the ``User.user_keywords`` -collection will now be mapped using a dictionary, where the ``UserKeyword.special_key`` -argument will be used as the key for the dictionary. We then apply a ``creator`` +Below, we modify our ``UserKeywordAssociation`` example such that the ``User.user_keyword_associations`` +collection will now be mapped using a dictionary, where the ``UserKeywordAssociation.special_key`` +argument will be used as the key for the dictionary. We also apply a ``creator`` argument to the ``User.keywords`` proxy so that these values are assigned appropriately when new elements are added to the dictionary:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() @@ -304,34 +307,36 @@ when new elements are added to the dictionary:: id = Column(Integer, primary_key=True) name = Column(String(64)) - # proxy to 'user_keywords', instantiating UserKeyword - # assigning the new key to 'special_key', values to - # 'keyword'. + # user/user_keyword_associations relationship, mapping + # user_keyword_associations with a dictionary against "special_key" as key. + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + collection_class=attribute_mapped_collection("special_key"), + cascade="all, delete-orphan", + ) + # proxy to 'user_keyword_associations', instantiating + # UserKeywordAssociation assigning the new key to 'special_key', + # values to 'keyword'. keywords = association_proxy( - "user_keywords", + "user_keyword_associations", "keyword", - creator=lambda k, v: UserKeyword(special_key=k, keyword=v), + creator=lambda k, v: UserKeywordAssociation(special_key=k, keyword=v), ) def __init__(self, name): self.name = name - class UserKeyword(Base): + class UserKeywordAssociation(Base): __tablename__ = "user_keyword" user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) - # bidirectional user/user_keywords relationships, mapping - # user_keywords with a dictionary against "special_key" as key. user = relationship( User, - backref=backref( - "user_keywords", - collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan", - ), + back_populates="user_keyword_associations", ) keyword = relationship("Keyword") @@ -348,7 +353,7 @@ when new elements are added to the dictionary:: return "Keyword(%s)" % repr(self.keyword) We illustrate the ``.keywords`` collection as a dictionary, mapping the -``UserKeyword.special_key`` value to ``Keyword`` objects:: +``UserKeywordAssociation.special_key`` value to ``Keyword`` objects:: >>> user = User("log") @@ -367,14 +372,14 @@ Given our previous examples of proxying from relationship to scalar attribute, proxying across an association object, and proxying dictionaries, we can combine all three techniques together to give ``User`` a ``keywords`` dictionary that deals strictly with the string value -of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeyword`` +of ``special_key`` mapped to the string ``keyword``. Both the ``UserKeywordAssociation`` and ``Keyword`` classes are entirely concealed. This is achieved by building an association proxy on ``User`` that refers to an association proxy -present on ``UserKeyword``:: +present on ``UserKeywordAssociation``:: from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import backref, declarative_base, relationship + from sqlalchemy.orm import declarative_base, relationship from sqlalchemy.orm.collections import attribute_mapped_collection Base = declarative_base() @@ -385,33 +390,32 @@ present on ``UserKeyword``:: id = Column(Integer, primary_key=True) name = Column(String(64)) - # the same 'user_keywords'->'keyword' proxy as in + user_keyword_associations = relationship( + "UserKeywordAssociation", + back_populates="user", + collection_class=attribute_mapped_collection("special_key"), + cascade="all, delete-orphan", + ) + # the same 'user_keyword_associations'->'keyword' proxy as in # the basic dictionary example. keywords = association_proxy( - "user_keywords", + "user_keyword_associations", "keyword", - creator=lambda k, v: UserKeyword(special_key=k, keyword=v), + creator=lambda k, v: UserKeywordAssociation(special_key=k, keyword=v), ) - # another proxy that is directly column-targeted - special_keys = association_proxy("user_keywords", "special_key") - def __init__(self, name): self.name = name - class UserKeyword(Base): + class UserKeywordAssociation(Base): __tablename__ = "user_keyword" user_id = Column(ForeignKey("user.id"), primary_key=True) keyword_id = Column(ForeignKey("keyword.id"), primary_key=True) special_key = Column(String) user = relationship( User, - backref=backref( - "user_keywords", - collection_class=attribute_mapped_collection("special_key"), - cascade="all, delete-orphan", - ), + back_populates="user_keyword_associations", ) # the relationship to Keyword is now called @@ -432,7 +436,7 @@ present on ``UserKeyword``:: self.keyword = keyword ``User.keywords`` is now a dictionary of string to string, where -``UserKeyword`` and ``Keyword`` objects are created and removed for us +``UserKeywordAssociation`` and ``Keyword`` objects are created and removed for us transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: @@ -451,7 +455,7 @@ association proxy, to apply a dictionary value to the collection at once:: {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage - ... print(user.user_keywords['sk3'].kw) + ... print(user.user_keyword_associations['sk3'].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created @@ -468,10 +472,10 @@ Querying with Association Proxies --------------------------------- The :class:`.AssociationProxy` features simple SQL construction capabilities -which work at the class level in a similar way as other ORM-mapped attributes. -Class-bound attributes such as ``User.keywords`` and ``User.special_keys`` -in the preceding example will provide for a SQL generating construct -when accessed at the class level. +which work at the class level in a similar way as other ORM-mapped attributes, +and provide rudimentary filtering support primarily based on the +SQL ``EXISTS`` keyword. + .. note:: The primary purpose of the association proxy extension is to allow for improved persistence and object-access patterns with mapped object @@ -480,6 +484,54 @@ when accessed at the class level. attributes when constructing SQL queries with JOINs, eager loading options, etc. +For this section, assume a class with both an association proxy +that refers to a column, as well as an association proxy that refers +to a related object, as in the example mapping below:: + + from sqlalchemy import Column, ForeignKey, Integer, String + from sqlalchemy.ext.associationproxy import association_proxy + from sqlalchemy.orm import declarative_base, relationship + from sqlalchemy.orm.collections import attribute_mapped_collection + + Base = declarative_base() + + + class User(Base): + __tablename__ = "user" + id = Column(Integer, primary_key=True) + name = Column(String(64)) + + user_keyword_associations = relationship( + "UserKeywordAssociation", + cascade="all, delete-orphan", + ) + + # object-targeted association proxy + keywords = association_proxy( + "user_keyword_associations", + "keyword", + ) + + # column-targeted association proxy + special_keys = association_proxy( + "user_keyword_associations", "special_key" + ) + + + class UserKeywordAssociation(Base): + __tablename__ = "user_keyword" + user_id = Column(ForeignKey("user.id"), primary_key=True) + keyword_id = Column(ForeignKey("keyword.id"), primary_key=True) + special_key = Column(String) + keyword = relationship("Keyword") + + + class Keyword(Base): + __tablename__ = "keyword" + id = Column(Integer, primary_key=True) + keyword = Column("keyword", String(64)) + + The SQL generated takes the form of a correlated subquery against the EXISTS SQL operator so that it can be used in a WHERE clause without the need for additional modifications to the enclosing query. If the From 899f5d05363f144f60cfbc30717df0d673e1d51f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 13 Jul 2022 10:54:52 -0400 Subject: [PATCH 306/632] link to main isolation level document this was already in 2.0. a few more corrections coming Change-Id: Id94eee9081fd5174bad2275f544010e7cb467454 references: #8252 --- lib/sqlalchemy/engine/create.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 7816a301061..b9886b701b7 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -289,19 +289,7 @@ def create_engine(url, **kwargs): .. seealso:: - :attr:`_engine.Connection.default_isolation_level` - - view default level - - :paramref:`.Connection.execution_options.isolation_level` - - set per :class:`_engine.Connection` isolation level - - :ref:`SQLite Transaction Isolation ` - - :ref:`PostgreSQL Transaction Isolation ` - - :ref:`MySQL Transaction Isolation ` - - :ref:`session_transaction_isolation` - for the ORM + :ref:`dbapi_autocommit` :param json_deserializer: for dialects that support the :class:`_types.JSON` From 9f42c3aeabf299183c576f5c01b6376685ea80f1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 13 Jul 2022 11:02:37 -0400 Subject: [PATCH 307/632] document create_engine.isolation_level for PG Change-Id: I06eaede9e021eb0790929168e9bedb0c8b58140a References: #8252 (cherry picked from commit a3a3299b5b9728a432ba900754047844ecfb98d6) --- lib/sqlalchemy/dialects/mssql/base.py | 5 ++++- lib/sqlalchemy/dialects/mysql/base.py | 5 +++++ lib/sqlalchemy/dialects/postgresql/base.py | 15 +++++++++------ 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 735cc3cff82..3c22b9b7c96 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -433,7 +433,10 @@ class TestTable(Base): * ``SERIALIZABLE`` * ``SNAPSHOT`` - specific to SQL Server -.. versionadded:: 1.2 added AUTOCOMMIT isolation level setting +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. .. seealso:: diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 26af6eb799f..95e9cd1b37d 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -238,6 +238,11 @@ the database connection will return true for the value of ``SELECT @@autocommit;``. +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. + .. seealso:: :ref:`dbapi_autocommit` diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index dbaced5db53..23d4a49feeb 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -148,7 +148,7 @@ def use_identity(element, compiler, **kw): --------------------------- Most SQLAlchemy dialects support setting of transaction isolation level -using the :paramref:`_sa.create_engine.execution_options` parameter +using the :paramref:`_sa.create_engine.isolation_level` parameter at the :func:`_sa.create_engine` level, and at the :class:`_engine.Connection` level via the :paramref:`.Connection.execution_options.isolation_level` parameter. @@ -166,9 +166,7 @@ def use_identity(element, compiler, **kw): engine = create_engine( "postgresql+pg8000://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + isolation_level = "REPEATABLE READ" ) To set using per-connection execution options:: @@ -180,6 +178,11 @@ def use_identity(element, compiler, **kw): with conn.begin(): # ... work with transaction +There are also more options for isolation level configurations, such as +"sub-engine" objects linked to a main :class:`.Engine` which each apply +different isolation level settings. See the discussion at +:ref:`dbapi_autocommit` for background. + Valid values for ``isolation_level`` on most PostgreSQL dialects include: * ``READ COMMITTED`` @@ -190,10 +193,10 @@ def use_identity(element, compiler, **kw): .. seealso:: - :ref:`postgresql_readonly_deferrable` - :ref:`dbapi_autocommit` + :ref:`postgresql_readonly_deferrable` + :ref:`psycopg2_isolation_level` :ref:`pg8000_isolation_level` From c12b4f991c5ab5bf9e1ba52261f7bedfd26293c8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 15 Jul 2022 08:55:08 -0400 Subject: [PATCH 308/632] step five Change-Id: Ib6242d676c800b4b679aaf7f33f641ebcaed5b33 (cherry picked from commit a0597341195ba7445ef1e9c69092e3bd29427aec) --- doc/build/changelog/migration_20.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 64bead3cd1b..626574cc061 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -327,7 +327,7 @@ The new engine is described at :class:`_future.Engine` which delivers a new -Migration to 2.0 Step Four - Use the ``future`` flag on Session +Migration to 2.0 Step Five - Use the ``future`` flag on Session --------------------------------------------------------------- The :class:`_orm.Session` object also features an updated transaction/connection From abe993344f18df3aab0f898b6a357e947d23416e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 15 Jul 2022 12:53:37 -0400 Subject: [PATCH 309/632] remove needlessly complex assoc proxy mixin example this is some very exotic example that doesn't really explain anything new about mixins and only serves to make the docs less accessible. Change-Id: Ic51a12de3358f3a451bd7cf3542b375569499fc1 (cherry picked from commit 68d882387978d60dd354ba067de85ea298940376) --- doc/build/orm/declarative_mixins.rst | 86 ---------------------------- 1 file changed, 86 deletions(-) diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 1221616d504..21345ccdc9c 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -329,92 +329,6 @@ the :class:`_orm.declared_attr` is invoked:: so that :class:`_orm.declared_attr` methods can access the actual column that will be mapped. -Mixing in Association Proxy and Other Attributes -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Mixins can specify user-defined attributes as well as other extension -units such as :func:`.association_proxy`. The usage of -:class:`_orm.declared_attr` is required in those cases where the attribute must -be tailored specifically to the target subclass. An example is when -constructing multiple :func:`.association_proxy` attributes which each -target a different type of child object. Below is an -:func:`.association_proxy` mixin example which provides a scalar list of -string values to an implementing class:: - - from sqlalchemy import Column, ForeignKey, Integer, String - from sqlalchemy.ext.associationproxy import association_proxy - from sqlalchemy.orm import ( - declarative_base, - declarative_mixin, - declared_attr, - relationship, - ) - - Base = declarative_base() - - - @declarative_mixin - class HasStringCollection: - @declared_attr - def _strings(cls): - class StringAttribute(Base): - __tablename__ = cls.string_table_name - id = Column(Integer, primary_key=True) - value = Column(String(50), nullable=False) - parent_id = Column( - Integer, - ForeignKey(f"{cls.__tablename__}.id"), - nullable=False, - ) - - def __init__(self, value): - self.value = value - - return relationship(StringAttribute) - - @declared_attr - def strings(cls): - return association_proxy("_strings", "value") - - - class TypeA(HasStringCollection, Base): - __tablename__ = "type_a" - string_table_name = "type_a_strings" - id = Column(Integer(), primary_key=True) - - - class TypeB(HasStringCollection, Base): - __tablename__ = "type_b" - string_table_name = "type_b_strings" - id = Column(Integer(), primary_key=True) - -Above, the ``HasStringCollection`` mixin produces a :func:`_orm.relationship` -which refers to a newly generated class called ``StringAttribute``. The -``StringAttribute`` class is generated with its own :class:`_schema.Table` -definition which is local to the parent class making usage of the -``HasStringCollection`` mixin. It also produces an :func:`.association_proxy` -object which proxies references to the ``strings`` attribute onto the ``value`` -attribute of each ``StringAttribute`` instance. - -``TypeA`` or ``TypeB`` can be instantiated given the constructor -argument ``strings``, a list of strings:: - - ta = TypeA(strings=["foo", "bar"]) - tb = TypeB(strings=["bat", "bar"]) - -This list will generate a collection -of ``StringAttribute`` objects, which are persisted into a table that's -local to either the ``type_a_strings`` or ``type_b_strings`` table:: - - >>> print(ta._strings) - [<__main__.StringAttribute object at 0x10151cd90>, - <__main__.StringAttribute object at 0x10151ce10>] - -When constructing the :func:`.association_proxy`, the -:class:`_orm.declared_attr` decorator must be used so that a distinct -:func:`.association_proxy` object is created for each of the ``TypeA`` -and ``TypeB`` classes. - .. _decl_mixin_inheritance: Controlling table inheritance with mixins From 0052827c44e9124e89cb3ba5b922b786fe333c9e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 17 Jul 2022 11:32:27 -0400 Subject: [PATCH 310/632] use concat() directly for contains, startswith, endswith Adjusted the SQL compilation for string containment functions ``.contains()``, ``.startswith()``, ``.endswith()`` to force the use of the string concatenation operator, rather than relying upon the overload of the addition operator, so that non-standard use of these operators with for example bytestrings still produces string concatenation operators. To accommodate this, needed to add a new _rconcat operator function, which is private, as well as a fallback in concat_op() that works similarly to Python builtin ops. Fixes: #8253 Change-Id: I2b7f56492f765742d88cb2a7834ded6a2892bd7e (cherry picked from commit 85a88df13ab8d217331cf98392544a888b4d7df3) --- doc/build/changelog/unreleased_14/8253.rst | 10 +++ lib/sqlalchemy/sql/compiler.py | 12 ++-- lib/sqlalchemy/sql/operators.py | 17 ++++- test/sql/test_operators.py | 82 ++++++++++++++++++++++ 4 files changed, 114 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8253.rst diff --git a/doc/build/changelog/unreleased_14/8253.rst b/doc/build/changelog/unreleased_14/8253.rst new file mode 100644 index 00000000000..7496ae9fb0c --- /dev/null +++ b/doc/build/changelog/unreleased_14/8253.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sql + :tickets: 8253 + + Adjusted the SQL compilation for string containment functions + ``.contains()``, ``.startswith()``, ``.endswith()`` to force the use of the + string concatenation operator, rather than relying upon the overload of the + addition operator, so that non-standard use of these operators with for + example bytestrings still produces string concatenation operators. + diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 667dd7d3de5..330f3c3bc86 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2295,37 +2295,37 @@ def _like_percent_literal(self): def visit_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) + binary.right = percent.concat(binary.right).concat(percent) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_contains_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right).__add__(percent) + binary.right = percent.concat(binary.right).concat(percent) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__radd__(binary.right) + binary.right = percent._rconcat(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_startswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__radd__(binary.right) + binary.right = percent._rconcat(binary.right) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) + binary.right = percent.concat(binary.right) return self.visit_like_op_binary(binary, operator, **kw) def visit_not_endswith_op_binary(self, binary, operator, **kw): binary = binary._clone() percent = self._like_percent_literal - binary.right = percent.__add__(binary.right) + binary.right = percent.concat(binary.right) return self.visit_not_like_op_binary(binary, operator, **kw) def visit_like_op_binary(self, binary, operator, **kw): diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 826b3129384..1da50322967 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -466,6 +466,16 @@ def concat(self, other): """ return self.operate(concat_op, other) + def _rconcat(self, other): + """Implement an 'rconcat' operator. + + this is for internal use at the moment + + .. versionadded:: 1.4.40 + + """ + return self.reverse_operate(concat_op, other) + def like(self, other, escape=None): r"""Implement the ``like`` operator. @@ -1512,7 +1522,12 @@ def filter_op(a, b): def concat_op(a, b): - return a.concat(b) + try: + concat = a.concat + except AttributeError: + return b._rconcat(a) + else: + return concat(b) def desc_op(a): diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 116d6b79232..62f33c2ec24 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -2841,6 +2841,36 @@ def test_contains(self): checkparams={"x_1": "y"}, ) + def test_contains_encoded(self): + self.assert_compile( + column("x").contains(b"y"), + "x LIKE '%' || :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_not_contains_encoded(self): + self.assert_compile( + ~column("x").contains(b"y"), + "x NOT LIKE '%' || :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_contains_encoded_mysql(self): + self.assert_compile( + column("x").contains(b"y"), + "x LIKE concat(concat('%%', %s), '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_contains_encoded_mysql(self): + self.assert_compile( + ~column("x").contains(b"y"), + "x NOT LIKE concat(concat('%%', %s), '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + def test_contains_escape(self): self.assert_compile( column("x").contains("a%b_c", escape="\\"), @@ -3004,6 +3034,36 @@ def test_startswith_autoescape_custom_escape(self): checkparams={"x_1": "a^%b^_c/d^^e"}, ) + def test_startswith_encoded(self): + self.assert_compile( + column("x").startswith(b"y"), + "x LIKE :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_startswith_encoded_mysql(self): + self.assert_compile( + column("x").startswith(b"y"), + "x LIKE concat(%s, '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_startswith_encoded(self): + self.assert_compile( + ~column("x").startswith(b"y"), + "x NOT LIKE :x_1 || '%'", + checkparams={"x_1": b"y"}, + ) + + def test_not_startswith_encoded_mysql(self): + self.assert_compile( + ~column("x").startswith(b"y"), + "x NOT LIKE concat(%s, '%%')", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + def test_not_startswith(self): self.assert_compile( ~column("x").startswith("y"), @@ -3094,6 +3154,28 @@ def test_endswith(self): checkparams={"x_1": "y"}, ) + def test_endswith_encoded(self): + self.assert_compile( + column("x").endswith(b"y"), + "x LIKE '%' || :x_1", + checkparams={"x_1": b"y"}, + ) + + def test_endswith_encoded_mysql(self): + self.assert_compile( + column("x").endswith(b"y"), + "x LIKE concat('%%', %s)", + checkparams={"x_1": b"y"}, + dialect="mysql", + ) + + def test_not_endswith_encoded(self): + self.assert_compile( + ~column("x").endswith(b"y"), + "x NOT LIKE '%' || :x_1", + checkparams={"x_1": b"y"}, + ) + def test_endswith_escape(self): self.assert_compile( column("x").endswith("a%b_c", escape="\\"), From 6d0570de2b43463b82fbf6628610bf3c89befd59 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 17 Jun 2022 23:12:39 +0200 Subject: [PATCH 311/632] add shield() in aexit Added ``asyncio.shield()`` to the connection and session release process specifically within the ``__aexit__()`` context manager exit, when using :class:`.AsyncConnection` or :class:`.AsyncSession` as a context manager that releases the object when the context manager is complete. This appears to help with task cancellation when using alternate concurrency libraries such as ``anyio``, ``uvloop`` that otherwise don't provide an async context for the connection pool to release the connection properly during task cancellation. Fixes: #8145 Change-Id: I0b1ea9c3a22a18619341cbb8591225fcd339042c (cherry picked from commit 1acaf0b2e4859a274e753b5054dcde3d5c7ca10e) --- doc/build/changelog/unreleased_14/8145.rst | 14 ++++++++++++++ lib/sqlalchemy/ext/asyncio/engine.py | 13 +++++++++---- lib/sqlalchemy/ext/asyncio/session.py | 14 ++++++++++---- 3 files changed, 33 insertions(+), 8 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8145.rst diff --git a/doc/build/changelog/unreleased_14/8145.rst b/doc/build/changelog/unreleased_14/8145.rst new file mode 100644 index 00000000000..4cd6c12a588 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8145.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, asyncio + :tickets: 8145 + + Added ``asyncio.shield()`` to the connection and session release process + specifically within the ``__aexit__()`` context manager exit, when using + :class:`.AsyncConnection` or :class:`.AsyncSession` as a context manager + that releases the object when the context manager is complete. This appears + to help with task cancellation when using alternate concurrency libraries + such as ``anyio``, ``uvloop`` that otherwise don't provide an async context + for the connection pool to release the connection properly during task + cancellation. + + diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 63d148c9757..4fbe4f7a592 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -4,6 +4,8 @@ # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +import asyncio + from . import exc as async_exc from .base import ProxyComparable from .base import StartableContext @@ -549,7 +551,7 @@ def __await__(self): return self.start().__await__() async def __aexit__(self, type_, value, traceback): - await self.close() + await asyncio.shield(self.close()) @util.create_proxy_methods( @@ -600,8 +602,11 @@ async def start(self, is_ctxmanager=False): return self.conn async def __aexit__(self, type_, value, traceback): - await self.transaction.__aexit__(type_, value, traceback) - await self.conn.close() + async def go(): + await self.transaction.__aexit__(type_, value, traceback) + await self.conn.close() + + await asyncio.shield(go()) def __init__(self, sync_engine): if not sync_engine.dialect.is_async: @@ -698,7 +703,7 @@ async def dispose(self): """ - return await greenlet_spawn(self.sync_engine.dispose) + await greenlet_spawn(self.sync_engine.dispose) class AsyncTransaction(ProxyComparable, StartableContext): diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index ce6a0db090c..378cbcbf2f8 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -4,6 +4,9 @@ # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php + +import asyncio + from . import engine from . import result as _result from .base import ReversibleProxy @@ -607,7 +610,7 @@ async def close(self): :meth:`_asyncio.AsyncSession.close` """ - return await greenlet_spawn(self.sync_session.close) + await greenlet_spawn(self.sync_session.close) async def invalidate(self): """Close this Session, using connection invalidation. @@ -625,7 +628,7 @@ async def __aenter__(self): return self async def __aexit__(self, type_, value, traceback): - await self.close() + await asyncio.shield(self.close()) def _maker_context_manager(self): # no @contextlib.asynccontextmanager until python3.7, gr @@ -642,8 +645,11 @@ async def __aenter__(self): return self.async_session async def __aexit__(self, type_, value, traceback): - await self.trans.__aexit__(type_, value, traceback) - await self.async_session.__aexit__(type_, value, traceback) + async def go(): + await self.trans.__aexit__(type_, value, traceback) + await self.async_session.__aexit__(type_, value, traceback) + + await asyncio.shield(go()) class AsyncSessionTransaction(ReversibleProxy, StartableContext): From 59fbe5d57ef11783936add7e2a59348e7f7f5ad3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Jul 2022 11:13:59 -0400 Subject: [PATCH 312/632] render all three elements selected in bundle example Fixes: #8013 Change-Id: I9b5f800d94abd80d07ca5f58c24f111618415674 (cherry picked from commit eefc8c985400cd458e561d61299a2b81bdff1189) --- doc/build/orm/queryguide.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 747a70abbf9..9bc585d3997 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -242,16 +242,15 @@ allows sets of column expressions to be grouped in result rows:: ... Bundle("email", Address.email_address) ... ).join_from(User, Address) {sql}>>> for row in session.execute(stmt): - ... print(f"{row.user.name} {row.email.email_address}") + ... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}") SELECT user_account.name, user_account.fullname, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id [...] (){stop} - spongebob spongebob@sqlalchemy.org - sandy sandy@sqlalchemy.org - sandy squirrel@squirrelpower.org - patrick pat999@aol.com - squidward stentcl@sqlalchemy.org - + spongebob Spongebob Squarepants spongebob@sqlalchemy.org + sandy Sandy Cheeks sandy@sqlalchemy.org + sandy Sandy Cheeks squirrel@squirrelpower.org + patrick Patrick Star pat999@aol.com + squidward Squidward Tentacles stentcl@sqlalchemy.org The :class:`_orm.Bundle` is potentially useful for creating lightweight views as well as custom column groupings such as mappings. From 81d20ae7f23c9e3f3487dc91687f934ee6ae124c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Jul 2022 10:50:05 -0400 Subject: [PATCH 313/632] check for TypeDecorator when handling getitem Fixed issue where :class:`.TypeDecorator` would not correctly proxy the ``__getitem__()`` operator when decorating the :class:`.ARRAY` datatype, without explicit workarounds. Fixes: #7249 Change-Id: I3273572b4757e41fb5952639cb867314227d368a (cherry picked from commit 1e01fab7e600c53284eabceceab5706e4074eb2e) --- doc/build/changelog/unreleased_14/7249.rst | 7 ++ lib/sqlalchemy/sql/default_comparator.py | 6 +- lib/sqlalchemy/sql/type_api.py | 23 +++- test/sql/test_types.py | 130 +++++++++++++++++++++ 4 files changed, 164 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7249.rst diff --git a/doc/build/changelog/unreleased_14/7249.rst b/doc/build/changelog/unreleased_14/7249.rst new file mode 100644 index 00000000000..5d0cb658187 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7249.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, types + :tickets: 7249 + + Fixed issue where :class:`.TypeDecorator` would not correctly proxy the + ``__getitem__()`` operator when decorating the :class:`.ARRAY` datatype, + without explicit workarounds. diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 7d2f1dd2a4a..70586c696f0 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -168,7 +168,11 @@ def _in_impl(expr, op, seq_or_selectable, negate_op, **kw): def _getitem_impl(expr, op, other, **kw): - if isinstance(expr.type, type_api.INDEXABLE): + if ( + isinstance(expr.type, type_api.INDEXABLE) + or isinstance(expr.type, type_api.TypeDecorator) + and isinstance(expr.type.impl, type_api.INDEXABLE) + ): other = coercions.expect( roles.BinaryElementRole, other, expr=expr, operator=op ) diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 7431c08a41d..172ce0d884e 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1265,8 +1265,11 @@ def coerce_compared_value(self, op, value): the default rules of :meth:`.TypeEngine.coerce_compared_value` should be used in order to deal with operators like index operations:: + from sqlalchemy import JSON + from sqlalchemy import TypeDecorator + class MyJsonType(TypeDecorator): - impl = postgresql.JSON + impl = JSON cache_ok = True @@ -1276,6 +1279,24 @@ def coerce_compared_value(self, op, value): Without the above step, index operations such as ``mycol['foo']`` will cause the index value ``'foo'`` to be JSON encoded. + Similarly, when working with the :class:`.ARRAY` datatype, the + type coercion for index operations (e.g. ``mycol[5]``) is also + handled by :meth:`.TypeDecorator.coerce_compared_value`, where + again a simple override is sufficient unless special rules are needed + for particular operators:: + + from sqlalchemy import ARRAY + from sqlalchemy import TypeDecorator + + class MyArrayType(TypeDecorator): + impl = ARRAY + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + """ __visit_name__ = "type_decorator" diff --git a/test/sql/test_types.py b/test/sql/test_types.py index 12932a1c9c7..c4f2f27260a 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -784,6 +784,136 @@ def test_expanding_in_typedec_of_typedec(self, connection): eq_(result.fetchall(), [(3, 1500), (4, 900)]) +class TypeDecoratorSpecialCasesTest(AssertsCompiledSQL, fixtures.TestBase): + __backend__ = True + + @testing.requires.array_type + def test_typedec_of_array_modified(self, metadata, connection): + """test #7249""" + + class SkipsFirst(TypeDecorator): # , Indexable): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def process_bind_param(self, value, dialect): + return value[1:] + + def copy(self, **kw): + return SkipsFirst(**kw) + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", SkipsFirst), + ) + t.create(connection) + + connection.execute(t.insert(), {"data": [1, 2, 3]}) + val = connection.scalar(select(t.c.data)) + eq_(val, [2, 3]) + + val = connection.scalar(select(t.c.data[0])) + eq_(val, 2) + + def test_typedec_of_array_ops(self): + class ArrayDec(TypeDecorator): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + expr1 = column("q", ArrayDec)[0] + expr2 = column("q", ARRAY(Integer, zero_indexes=True))[0] + + eq_(expr1.right.type._type_affinity, Integer) + eq_(expr2.right.type._type_affinity, Integer) + + self.assert_compile( + column("q", ArrayDec).any(7, operator=operators.lt), + "%(q_1)s < ANY (q)", + dialect="postgresql", + ) + + self.assert_compile( + column("q", ArrayDec)[5], "q[%(q_1)s]", dialect="postgresql" + ) + + def test_typedec_of_json_ops(self): + class JsonDec(TypeDecorator): + impl = JSON() + + cache_ok = True + + self.assert_compile( + column("q", JsonDec)["q"], "q -> %(q_1)s", dialect="postgresql" + ) + + self.assert_compile( + column("q", JsonDec)["q"].as_integer(), + "CAST(q ->> %(q_1)s AS INTEGER)", + dialect="postgresql", + ) + + @testing.requires.array_type + def test_typedec_of_array(self, metadata, connection): + """test #7249""" + + class ArrayDec(TypeDecorator): + impl = ARRAY(Integer, zero_indexes=True) + + cache_ok = True + + def coerce_compared_value(self, op, value): + return self.impl.coerce_compared_value(op, value) + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", ArrayDec), + ) + + t.create(connection) + + connection.execute(t.insert(), {"data": [1, 2, 3]}) + val = connection.scalar(select(t.c.data)) + eq_(val, [1, 2, 3]) + + val = connection.scalar(select(t.c.data[0])) + eq_(val, 1) + + @testing.requires.json_type + def test_typedec_of_json(self, metadata, connection): + """test #7249""" + + class JsonDec(TypeDecorator): + impl = JSON() + + cache_ok = True + + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", JsonDec), + ) + t.create(connection) + + connection.execute(t.insert(), {"data": {"key": "value"}}) + val = connection.scalar(select(t.c.data)) + eq_(val, {"key": "value"}) + + val = connection.scalar(select(t.c.data["key"].as_string())) + eq_(val, "value") + + class BindProcessorInsertValuesTest(UserDefinedRoundTripTest): """related to #6770, test that insert().values() applies to bound parameter handlers including the None value.""" From 742ca44700b5c1a715fb18f50905c64435e79474 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 21 Jul 2022 09:41:50 -0400 Subject: [PATCH 314/632] clarify update perf test has only one test so far this was pretty misleading as it shows up first in the file listing Change-Id: I6a92820e487a04632b651f9f6c631b32e338c043 (cherry picked from commit b9043754f039ff5b2bdf2379bd3d89eadd81e96d) --- examples/performance/bulk_updates.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/performance/bulk_updates.py b/examples/performance/bulk_updates.py index 0657c96f326..c15d0f16726 100644 --- a/examples/performance/bulk_updates.py +++ b/examples/performance/bulk_updates.py @@ -1,5 +1,5 @@ -"""This series of tests illustrates different ways to UPDATE a large number -of rows in bulk. +"""This series of tests will illustrate different ways to UPDATE a large number +of rows in bulk (under construction! there's just one test at the moment) """ From 9c9d88d546829f6aca48fb421c1484f25828160a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 23 Jul 2022 10:18:06 -0400 Subject: [PATCH 315/632] remove mypy_path workaround and ensure messages received Fixes: #8281 Change-Id: Ice47880ba7924daff68aef6b1791f3c66849f550 (cherry picked from commit 4fe222d9412df30fc15ace3d7a7fd4365eb9e05a) --- test/ext/mypy/files/relationship_err3.py | 2 +- test/ext/mypy/files/typing_err3.py | 1 - test/ext/mypy/test_mypy_plugin_py3k.py | 10 +++++++++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/test/ext/mypy/files/relationship_err3.py b/test/ext/mypy/files/relationship_err3.py index aa76ae1f0e0..1c7cd9f303d 100644 --- a/test/ext/mypy/files/relationship_err3.py +++ b/test/ext/mypy/files/relationship_err3.py @@ -24,7 +24,7 @@ class A(Base): id = Column(Integer, primary_key=True) data = Column(String) - # EXPECTED: Left hand assignment 'bs: "Set[B]"' not compatible with ORM mapped expression of type "Mapped[List[B]]" # noqa + bs: Set[B] = relationship(B, uselist=True, back_populates="a") # EXPECTED: Left hand assignment 'another_bs: "Set[B]"' not compatible with ORM mapped expression of type "Mapped[B]" # noqa diff --git a/test/ext/mypy/files/typing_err3.py b/test/ext/mypy/files/typing_err3.py index 5383f89560c..3fd1f86ff60 100644 --- a/test/ext/mypy/files/typing_err3.py +++ b/test/ext/mypy/files/typing_err3.py @@ -22,7 +22,6 @@ class User(Base): id = Column(Integer, primary_key=True) - # EXPECTED_MYPY: Unexpected keyword argument "wrong_arg" for "RelationshipProperty" # noqa addresses: Mapped[List["Address"]] = relationship( "Address", wrong_arg="imwrong" ) diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index 5a6a2972732..181a7958f3f 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -65,7 +65,8 @@ def run(path, use_plugin=True, incremental=False): args.append(path) - return api.run(args) + result = api.run(args) + return result return run @@ -181,6 +182,8 @@ def test_mypy(self, mypy_runner, path): result = mypy_runner(path, use_plugin=use_plugin) + not_located = [] + if expected_errors: eq_(result[2], 1, msg=result) @@ -201,9 +204,14 @@ def test_mypy(self, mypy_runner, path): ): break else: + not_located.append(msg) continue del errors[idx] + if not_located: + print(f"Couldn't locate expected messages: {not_located}") + assert False, "expected messages not found, see stdout" + assert not errors, "errors remain: %s" % "\n".join(errors) else: From b06b2e865856dc4589b6d48944a8d73e2c2b4b37 Mon Sep 17 00:00:00 2001 From: Paul Lettich Date: Tue, 26 Jul 2022 23:28:28 +0200 Subject: [PATCH 316/632] Update metadata.rst (#8290) add missing colon in docs (cherry picked from commit 2ab519f59cf81307966dba3d5b8a176d45deb297) --- doc/build/core/metadata.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index fa1872d68d0..701146195a7 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -536,7 +536,7 @@ Schemas and Reflection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The schema feature of SQLAlchemy interacts with the table reflection -feature introduced at ref:`metadata_reflection_toplevel`. See the section +feature introduced at :ref:`metadata_reflection_toplevel`. See the section :ref:`metadata_reflection_schemas` for additional details on how this works. From 379e32fbda3c30c3935e523579e39b99a06401fa Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 30 Jul 2022 00:30:28 -0400 Subject: [PATCH 317/632] link to index correctly Fixes: #8303 Change-Id: If3568309e4dd3e9ef715b32f9ad90eeba7f662e8 (cherry picked from commit 0c30dcfb4ba45400cf7df9056c53e4ad7fef1ad2) --- doc/build/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/index.rst b/doc/build/index.rst index 4a7059029ba..361ccfa5d46 100644 --- a/doc/build/index.rst +++ b/doc/build/index.rst @@ -173,4 +173,4 @@ SQLAlchemy Documentation * :doc:`Glossary ` - Terms used in SQLAlchemy's documentation * :doc:`Error Message Guide ` - Explainations of many SQLAlchemy Errors * :doc:`Complete table of of contents ` - * :doc:`Index ` + * :ref:`Index ` From c8f7ec79d29620461bcd64bf6e30bcf243c1d7e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Prei=C3=9F?= Date: Sat, 30 Jul 2022 12:47:15 +0200 Subject: [PATCH 318/632] glossary: primary_key needs value (#8298) (cherry picked from commit 8076f9f590bdf9d7d138bae3408b81e59771dbd5) --- doc/build/glossary.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index c05cedc3299..b7f39b6832a 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -1198,7 +1198,7 @@ Glossary class Employee(Base): __tablename__ = 'employee' - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) projects = relationship( @@ -1215,7 +1215,7 @@ Glossary class Project(Base): __tablename__ = 'project' - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) Above, the ``Employee.projects`` and back-referencing ``Project.employees`` @@ -1311,14 +1311,14 @@ Glossary class Employee(Base): __tablename__ = 'employee' - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) class Project(Base): __tablename__ = 'project' - id = Column(Integer, primary_key) + id = Column(Integer, primary_key=True) name = Column(String(30)) From 02c83a42d760975dc7d41158377290f2db281e75 Mon Sep 17 00:00:00 2001 From: bbben <70356237+bb-ben@users.noreply.github.com> Date: Sun, 31 Jul 2022 02:43:59 +0800 Subject: [PATCH 319/632] glossary: update the acronym definition (#8306) * glossary: fix typo * add 'Read' to the CRUD definition (cherry picked from commit b86112fd85c7810424308d3864a67462fbc9288c) --- doc/build/glossary.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index b7f39b6832a..28456cd16cb 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -314,8 +314,8 @@ Glossary An acronym for **Data Manipulation Language**. DML is the subset of SQL that relational databases use to *modify* the data in tables. DML typically refers to the three widely familiar statements of INSERT, - UPDATE and DELETE, otherwise known as :term:`CRUD` (acronym for "CReate, - Update, Delete"). + UPDATE and DELETE, otherwise known as :term:`CRUD` (acronym for "Create, + Read, Update, Delete"). .. seealso:: From 7014e04999045a605bab4a5e024bdfc6dd929985 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 30 Jul 2022 15:14:23 -0400 Subject: [PATCH 320/632] use inherited members for Query not sure why these four methods were here, they don't get sorted when doing it this way. Change-Id: I554f132df3f299858ca5b451a79fbd9dd1f520ee (cherry picked from commit 14bfbadfdf9260a1c40f63b31641b27fe9de12a0) --- doc/build/orm/query.rst | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/doc/build/orm/query.rst b/doc/build/orm/query.rst index 498679ea9eb..29df9f31d12 100644 --- a/doc/build/orm/query.rst +++ b/doc/build/orm/query.rst @@ -20,14 +20,7 @@ Following is the full interface for the :class:`_query.Query` object. .. autoclass:: sqlalchemy.orm.Query :members: - - .. automethod:: sqlalchemy.orm.Query.prefix_with - - .. automethod:: sqlalchemy.orm.Query.suffix_with - - .. automethod:: sqlalchemy.orm.Query.with_hint - - .. automethod:: sqlalchemy.orm.Query.with_statement_hint + :inherited-members: ORM-Specific Query Constructs ============================= From cde43899d6c872f6d58354f7324f707b9c8bcd8e Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sun, 31 Jul 2022 11:56:07 +0200 Subject: [PATCH 321/632] Update to flake8 5. Change-Id: I5a241a70efba68bcea9819ddce6aebc25703e68d (cherry picked from commit f8c4dba4e9f130c18ce00597c036bc26ae7abf90) --- .pre-commit-config.yaml | 6 +---- lib/sqlalchemy/dialects/mysql/dml.py | 3 ++- lib/sqlalchemy/engine/cursor.py | 6 +++-- lib/sqlalchemy/engine/result.py | 3 ++- lib/sqlalchemy/engine/row.py | 3 ++- lib/sqlalchemy/ext/asyncio/result.py | 8 +++--- lib/sqlalchemy/ext/mypy/util.py | 5 ++-- lib/sqlalchemy/orm/attributes.py | 3 ++- lib/sqlalchemy/orm/events.py | 30 +++++++++++++++-------- lib/sqlalchemy/sql/dml.py | 4 +-- lib/sqlalchemy/sql/functions.py | 3 ++- lib/sqlalchemy/sql/selectable.py | 17 +++++++------ lib/sqlalchemy/sql/type_api.py | 10 +++++--- lib/sqlalchemy/testing/requirements.py | 5 ++-- test/orm/inheritance/test_basic.py | 4 +-- test/orm/inheritance/test_relationship.py | 6 ++--- test/orm/test_assorted_eager.py | 3 ++- test/orm/test_deprecations.py | 4 +-- test/orm/test_manytomany.py | 5 ++-- test/requirements.py | 5 ++-- 20 files changed, 75 insertions(+), 58 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 94ac2c6876b..91b12737486 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,7 +14,7 @@ repos: - id: zimports - repo: https://github.com/pycqa/flake8 - rev: 3.9.2 + rev: 5.0.0 hooks: - id: flake8 additional_dependencies: @@ -26,7 +26,3 @@ repos: # in case it requires a version pin - pydocstyle - pygments - - - - diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index 0b508fe49de..0c8791a0d7b 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -29,7 +29,8 @@ class Insert(StandardInsert): @property def inserted(self): - """Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement + """Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE + statement MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row that would be inserted, via a special function called ``VALUES()``. diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index abe58e2fde1..774916d95df 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1371,7 +1371,8 @@ def _soft_close(self, hard=False): @property def inserted_primary_key_rows(self): - """Return the value of :attr:`_engine.CursorResult.inserted_primary_key` + """Return the value of + :attr:`_engine.CursorResult.inserted_primary_key` as a row contained within a list; some dialects may support a multiple row form as well. @@ -1712,7 +1713,8 @@ def lastrowid(self): @property def returns_rows(self): - """True if this :class:`_engine.CursorResult` returns zero or more rows. + """True if this :class:`_engine.CursorResult` returns zero or more + rows. I.e. if it is legal to call the methods :meth:`_engine.CursorResult.fetchone`, diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 912dccf4bf8..1fd4e1c92f2 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1768,7 +1768,8 @@ def null_result(): class ChunkedIteratorResult(IteratorResult): - """An :class:`.IteratorResult` that works from an iterator-producing callable. + """An :class:`.IteratorResult` that works from an iterator-producing + callable. The given ``chunks`` argument is a function that is given a number of rows to return in each chunk, or ``None`` for all rows. The function should diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index e6c32977391..e80e8c6bec9 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -549,7 +549,8 @@ def __ne__(self, other): class RowMapping(BaseRow, collections_abc.Mapping): - """A ``Mapping`` that maps column names and objects to :class:`.Row` values. + """A ``Mapping`` that maps column names and objects to :class:`.Row` + values. The :class:`.RowMapping` is available from a :class:`.Row` via the :attr:`.Row._mapping` attribute, as well as from the iterable interface diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index c69fe191bec..a77b6a8c943 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -323,8 +323,8 @@ async def freeze(self): return await greenlet_spawn(FrozenResult, self) def merge(self, *others): - """Merge this :class:`_asyncio.AsyncResult` with other compatible result - objects. + """Merge this :class:`_asyncio.AsyncResult` with other compatible + result objects. The object returned is an instance of :class:`_engine.MergedResult`, which will be composed of iterators from the given result @@ -495,8 +495,8 @@ async def one(self): class AsyncMappingResult(AsyncCommon): - """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary values - rather than :class:`_engine.Row` values. + """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary + values rather than :class:`_engine.Row` values. The :class:`_asyncio.AsyncMappingResult` object is acquired by calling the :meth:`_asyncio.AsyncResult.mappings` method. diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index 4d55cb72833..16b365e1eee 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -64,8 +64,9 @@ def serialize(self) -> JsonDict: } def expand_typevar_from_subtype(self, sub_type: TypeInfo) -> None: - """Expands type vars in the context of a subtype when an attribute is inherited - from a generic super type.""" + """Expands type vars in the context of a subtype when an attribute is + inherited from a generic super type. + """ if not isinstance(self.type, TypeVarType): return diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 2a1d3a2c3a2..efa20fb1cd1 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -376,7 +376,8 @@ def _queryable_attribute_unreduce(key, mapped_class, parententity, entity): class Mapped(QueryableAttribute, _Generic_T): - """Represent an ORM mapped :term:`descriptor` attribute for typing purposes. + """Represent an ORM mapped :term:`descriptor` attribute for typing + purposes. This class represents the complete descriptor interface for any class attribute that will have been :term:`instrumented` by the ORM diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 2bef6394d42..39659c72325 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1895,7 +1895,8 @@ def after_bulk_delete(self, delete_context): @_lifecycle_event def transient_to_pending(self, session, instance): - """Intercept the "transient to pending" transition for a specific object. + """Intercept the "transient to pending" transition for a specific + object. This event is a specialization of the :meth:`.SessionEvents.after_attach` event which is only invoked @@ -1916,7 +1917,8 @@ def transient_to_pending(self, session, instance): @_lifecycle_event def pending_to_transient(self, session, instance): - """Intercept the "pending to transient" transition for a specific object. + """Intercept the "pending to transient" transition for a specific + object. This less common transition occurs when an pending object that has not been flushed is evicted from the session; this can occur @@ -1937,7 +1939,8 @@ def pending_to_transient(self, session, instance): @_lifecycle_event def persistent_to_transient(self, session, instance): - """Intercept the "persistent to transient" transition for a specific object. + """Intercept the "persistent to transient" transition for a specific + object. This less common transition occurs when an pending object that has has been flushed is evicted from the session; this can occur @@ -1957,7 +1960,8 @@ def persistent_to_transient(self, session, instance): @_lifecycle_event def pending_to_persistent(self, session, instance): - """Intercept the "pending to persistent"" transition for a specific object. + """Intercept the "pending to persistent"" transition for a specific + object. This event is invoked within the flush process, and is similar to scanning the :attr:`.Session.new` collection within @@ -1979,7 +1983,8 @@ def pending_to_persistent(self, session, instance): @_lifecycle_event def detached_to_persistent(self, session, instance): - """Intercept the "detached to persistent" transition for a specific object. + """Intercept the "detached to persistent" transition for a specific + object. This event is a specialization of the :meth:`.SessionEvents.after_attach` event which is only invoked @@ -2015,7 +2020,8 @@ def detached_to_persistent(self, session, instance): @_lifecycle_event def loaded_as_persistent(self, session, instance): - """Intercept the "loaded as persistent" transition for a specific object. + """Intercept the "loaded as persistent" transition for a specific + object. This event is invoked within the ORM loading process, and is invoked very similarly to the :meth:`.InstanceEvents.load` event. However, @@ -2050,7 +2056,8 @@ def loaded_as_persistent(self, session, instance): @_lifecycle_event def persistent_to_deleted(self, session, instance): - """Intercept the "persistent to deleted" transition for a specific object. + """Intercept the "persistent to deleted" transition for a specific + object. This event is invoked when a persistent object's identity is deleted from the database within a flush, however the object @@ -2082,7 +2089,8 @@ def persistent_to_deleted(self, session, instance): @_lifecycle_event def deleted_to_persistent(self, session, instance): - """Intercept the "deleted to persistent" transition for a specific object. + """Intercept the "deleted to persistent" transition for a specific + object. This transition occurs only when an object that's been deleted successfully in a flush is restored due to a call to @@ -2099,7 +2107,8 @@ def deleted_to_persistent(self, session, instance): @_lifecycle_event def deleted_to_detached(self, session, instance): - """Intercept the "deleted to detached" transition for a specific object. + """Intercept the "deleted to detached" transition for a specific + object. This event is invoked when a deleted object is evicted from the session. The typical case when this occurs is when @@ -2122,7 +2131,8 @@ def deleted_to_detached(self, session, instance): @_lifecycle_event def persistent_to_detached(self, session, instance): - """Intercept the "persistent to detached" transition for a specific object. + """Intercept the "persistent to detached" transition for a specific + object. This event is invoked when a persistent object is evicted from the session. There are many conditions that cause this diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index 4a343147c96..07a4d7b2d58 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -511,8 +511,8 @@ def with_hint(self, text, selectable=None, dialect_name="*"): @property def entity_description(self): - """Return a :term:`plugin-enabled` description of the table and/or entity - which this DML construct is operating against. + """Return a :term:`plugin-enabled` description of the table and/or + entity which this DML construct is operating against. This attribute is generally useful when using the ORM, as an extended structure which includes information about mapped diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 963108d7c4c..29f41223d35 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -418,7 +418,8 @@ def filter(self, *criterion): return FunctionFilter(self, *criterion) def as_comparison(self, left_index, right_index): - """Interpret this expression as a boolean comparison between two values. + """Interpret this expression as a boolean comparison between two + values. This method is used for an ORM use case described at :ref:`relationship_custom_operator_sql_function`. diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 829f26030c4..8379e1ca735 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1834,8 +1834,8 @@ def alias(self, name=None): return tva def lateral(self, name=None): - """Return a new :class:`_sql.TableValuedAlias` with the lateral flag set, - so that it renders as LATERAL. + """Return a new :class:`_sql.TableValuedAlias` with the lateral flag + set, so that it renders as LATERAL. .. seealso:: @@ -5485,10 +5485,9 @@ def join(self, target, onclause=None, isouter=False, full=False): ) def outerjoin_from(self, from_, target, onclause=None, full=False): - r"""Create a SQL LEFT OUTER JOIN against this :class:`_expression.Select` - object's criterion - and apply generatively, returning the newly resulting - :class:`_expression.Select`. + r"""Create a SQL LEFT OUTER JOIN against this + :class:`_expression.Select` object's criterion and apply generatively, + returning the newly resulting :class:`_expression.Select`. Usage is the same as that of :meth:`_selectable.Select.join_from`. @@ -6768,7 +6767,8 @@ def select(self, whereclause=None, **kwargs): return Select._create_select_from_fromclause(self, [self], **kwargs) def correlate(self, *fromclause): - """Apply correlation to the subquery noted by this :class:`_sql.Exists`. + """Apply correlation to the subquery noted by this + :class:`_sql.Exists`. .. seealso:: @@ -6782,7 +6782,8 @@ def correlate(self, *fromclause): return e def correlate_except(self, *fromclause): - """Apply correlation to the subquery noted by this :class:`_sql.Exists`. + """Apply correlation to the subquery noted by this + :class:`_sql.Exists`. .. seealso:: diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 172ce0d884e..29dc74971c8 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -179,8 +179,8 @@ def __reduce__(self): """ def evaluates_none(self): - """Return a copy of this type which has the :attr:`.should_evaluate_none` - flag set to True. + """Return a copy of this type which has the + :attr:`.should_evaluate_none` flag set to True. E.g.:: @@ -1105,7 +1105,8 @@ class Emulated(object): """ def adapt_to_emulated(self, impltype, **kw): - """Given an impl class, adapt this type to the impl assuming "emulated". + """Given an impl class, adapt this type to the impl assuming + "emulated". The impl should also be an "emulated" version of this type, most likely the same class as this type itself. @@ -1152,7 +1153,8 @@ def adapt_native_to_emulated(cls, impl, **kw): @classmethod def adapt_emulated_to_native(cls, impl, **kw): - """Given an impl, adapt this type's class to the impl assuming "native". + """Given an impl, adapt this type's class to the impl assuming + "native". The impl will be an :class:`.Emulated` class but not a :class:`.NativeForEmulated`. diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index b3f7ddb502e..857d1fdef1e 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -228,9 +228,8 @@ def nullsordering(self): @property def standalone_binds(self): - """target database/driver supports bound parameters as column expressions - without being in the context of a typed column. - + """target database/driver supports bound parameters as column + expressions without being in the context of a typed column. """ return exclusions.closed() diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index 600726ce38e..6285a80a7f3 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -2062,8 +2062,8 @@ class Sub(Base): class DistinctPKTest(fixtures.MappedTest): - """test the construction of mapper.primary_key when an inheriting relationship - joins on a column other than primary key column.""" + """test the construction of mapper.primary_key when an inheriting + relationship joins on a column other than primary key column.""" run_inserts = "once" run_deletes = None diff --git a/test/orm/inheritance/test_relationship.py b/test/orm/inheritance/test_relationship.py index d12cb1999a0..0b1967f5191 100644 --- a/test/orm/inheritance/test_relationship.py +++ b/test/orm/inheritance/test_relationship.py @@ -2512,9 +2512,9 @@ def test_local_wpoly_innerjoins_roundtrip(self): class JoinAcrossJoinedInhMultiPath( fixtures.DeclarativeMappedTest, testing.AssertsCompiledSQL ): - """test long join paths with a joined-inh in the middle, where we go multiple - times across the same joined-inh to the same target but with other classes - in the middle. E.g. test [ticket:2908] + """test long join paths with a joined-inh in the middle, where we go + multiple times across the same joined-inh to the same target but with + other classes in the middle. E.g. test [ticket:2908] """ run_setup_mappers = "once" diff --git a/test/orm/test_assorted_eager.py b/test/orm/test_assorted_eager.py index f6d10d8e652..86921ff9fc4 100644 --- a/test/orm/test_assorted_eager.py +++ b/test/orm/test_assorted_eager.py @@ -415,7 +415,8 @@ def setup_mappers(cls): ), def test_eager_terminate(self): - """Eager query generation does not include the same mapper's table twice. + """Eager query generation does not include the same mapper's table + twice. Or, that bi-directional eager loads don't include each other in eager query generation. diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 05bfdf26dab..8febf3b3fcf 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -1883,8 +1883,8 @@ def test_clause_onclause(self): ) def test_from_self_resets_joinpaths(self): - """test a join from from_self() doesn't confuse joins inside the subquery - with the outside. + """test a join from from_self() doesn't confuse joins inside the + subquery with the outside. """ Item, Keyword = self.classes.Item, self.classes.Keyword diff --git a/test/orm/test_manytomany.py b/test/orm/test_manytomany.py index 1abf5551a5e..1155096a446 100644 --- a/test/orm/test_manytomany.py +++ b/test/orm/test_manytomany.py @@ -226,8 +226,9 @@ def test_self_referential_bidirectional_mutation(self): assert p2 in p1.parent_places def test_joinedload_on_double(self): - """test that a mapper can have two eager relationships to the same table, via - two different association tables. aliases are required.""" + """test that a mapper can have two eager relationships to the same + table, via two different association tables. aliases are required. + """ ( place_input, diff --git a/test/requirements.py b/test/requirements.py index e47099013ee..68e5f8bfe26 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -198,9 +198,8 @@ def non_native_boolean_unconstrained(self): @property def standalone_binds(self): - """target database/driver supports bound parameters as column expressions - without being in the context of a typed column. - + """target database/driver supports bound parameters as column + expressions without being in the context of a typed column. """ return skip_if(["firebird", "mssql+mxodbc"], "not supported by driver") From 011e5f87138a29c2b4555bf494cee16c804e1e45 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 1 Aug 2022 10:29:13 -0400 Subject: [PATCH 322/632] repair psycopg2 (and psycopg) multiple hosts format Fixed issue in psycopg2 dialect where the "multiple hosts" feature implemented for :ticket:`4392`, where multiple ``host:port`` pairs could be passed in the query string as ``?host=host1:port1&host=host2:port2&host=host3:port3`` was not implemented correctly, as it did not propagate the "port" parameter appropriately. Connections that didn't use a different "port" likely worked without issue, and connections that had "port" for some of the entries may have incorrectly passed on that hostname. The format is now corrected to pass hosts/ports appropriately. As part of this change, maintained support for another multihost style that worked unintentionally, which is comma-separated ``?host=h1,h2,h3&port=p1,p2,p3``. This format is more consistent with libpq's query-string format, whereas the previous format is inspired by a different aspect of libpq's URI format but is not quite the same thing. If the two styles are mixed together, an error is raised as this is ambiguous. Fixes: #4392 Change-Id: Ic9cc0b0e6e90725e158d9efe73e042853dd1263f (cherry picked from commit 93e6f4f05ba885b16accf0ad811160dd7d0eec70) --- doc/build/changelog/unreleased_14/4392.rst | 22 ++++ .../dialects/postgresql/psycopg2.py | 68 +++++++--- test/dialect/postgresql/test_dialect.py | 122 ++++++++++++++++-- 3 files changed, 184 insertions(+), 28 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/4392.rst diff --git a/doc/build/changelog/unreleased_14/4392.rst b/doc/build/changelog/unreleased_14/4392.rst new file mode 100644 index 00000000000..9b83b09cc58 --- /dev/null +++ b/doc/build/changelog/unreleased_14/4392.rst @@ -0,0 +1,22 @@ +.. change:: + :tags: bug, postgresql + :tickets: 4392 + + Fixed issue in psycopg2 dialect where the "multiple hosts" feature + implemented for :ticket:`4392`, where multiple ``host:port`` pairs could be + passed in the query string as + ``?host=host1:port1&host=host2:port2&host=host3:port3`` was not implemented + correctly, as it did not propagate the "port" parameter appropriately. + Connections that didn't use a different "port" likely worked without issue, + and connections that had "port" for some of the entries may have + incorrectly passed on that hostname. The format is now corrected to pass + hosts/ports appropriately. + + As part of this change, maintained support for another multihost style that + worked unintentionally, which is comma-separated + ``?host=h1,h2,h3&port=p1,p2,p3``. This format is more consistent with + libpq's query-string format, whereas the previous format is inspired by a + different aspect of libpq's URI format but is not quite the same thing. + + If the two styles are mixed together, an error is raised as this is + ambiguous. \ No newline at end of file diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index bacd60bbeff..67474271e8e 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -120,22 +120,51 @@ psycopg2 supports multiple connection points in the connection string. When the ``host`` parameter is used multiple times in the query section of the URL, SQLAlchemy will create a single string of the host and port -information provided to make the connections:: +information provided to make the connections. Tokens may consist of +``host::port`` or just ``host``; in the latter case, the default port +is selected by libpq. In the example below, three host connections +are specified, for ``HostA::PortA``, ``HostB`` connecting to the default port, +and ``HostC::PortC``:: create_engine( - "postgresql+psycopg2://user:password@/dbname?host=HostA:port1&host=HostB&host=HostC" + "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC" ) -A connection to each host is then attempted until either a connection is successful -or all connections are unsuccessful in which case an error is raised. +As an alternative, libpq query string format also may be used; this specifies +``host`` and ``port`` as single query string arguments with comma-separated +lists - the default port can be chosen by indicating an empty value +in the comma separated list:: + + create_engine( + "postgresql+psycopg2://user:password@/dbname?host=HostA,HostB,HostC&port=PortA,,PortC" + ) + +With either URL style, connections to each host is attempted based on a +configurable strategy, which may be configured using the libpq +``target_session_attrs`` parameter. Per libpq this defaults to ``any`` +which indicates a connection to each host is then attempted until a connection is successful. +Other strategies include ``primary``, ``prefer-standby``, etc. The complete +list is documented by PostgreSQL at +`libpq connection strings `_. + +For example, to indicate two hosts using the ``primary`` strategy:: + + create_engine( + "postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC&target_session_attrs=primary" + ) + +.. versionchanged:: 1.4.40 Port specification in psycopg2 multiple host format + is repaired, previously ports were not correctly interpreted in this context. + libpq comma-separated format is also now supported. .. versionadded:: 1.3.20 Support for multiple hosts in PostgreSQL connection string. .. seealso:: - `PQConnString \ - `_ + `libpq connection strings `_ - please refer + to this section in the libpq documentation for complete background on multiple host support. + Empty DSN Connections / Environment Variable Connections --------------------------------------------------------- @@ -988,20 +1017,27 @@ def create_connect_args(self, url): if "host" in url.query: is_multihost = isinstance(url.query["host"], (list, tuple)) - if opts: + if opts or url.query: + if not opts: + opts = {} if "port" in opts: opts["port"] = int(opts["port"]) opts.update(url.query) if is_multihost: - opts["host"] = ",".join(url.query["host"]) - # send individual dbname, user, password, host, port - # parameters to psycopg2.connect() - return ([], opts) - elif url.query: - # any other connection arguments, pass directly - opts.update(url.query) - if is_multihost: - opts["host"] = ",".join(url.query["host"]) + hosts, ports = zip( + *[ + token.split(":") if ":" in token else (token, "") + for token in url.query["host"] + ] + ) + opts["host"] = ",".join(hosts) + if "port" in opts: + raise exc.ArgumentError( + "Can't mix 'multihost' formats together; use " + '"host=h1,h2,h3&port=p1,p2,p3" or ' + '"host=h1:p1&host=h2:p2&host=h3:p3" separately' + ) + opts["port"] = ",".join(ports) return ([], opts) else: # no connection arguments whatsoever; psycopg2.connect() diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index 8aa90364956..fa470a18ce1 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -8,6 +8,7 @@ from sqlalchemy import bindparam from sqlalchemy import cast from sqlalchemy import Column +from sqlalchemy import create_engine from sqlalchemy import DateTime from sqlalchemy import DDL from sqlalchemy import event @@ -41,6 +42,7 @@ from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.testing import config from sqlalchemy.testing import engines +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import is_false @@ -237,24 +239,85 @@ def test_psycopg2_nonempty_connection_string_w_query(self): eq_(cargs, []) eq_(cparams, {"host": "somehost", "any_random_thing": "yes"}) - def test_psycopg2_nonempty_connection_string_w_query_two(self): + @testing.combinations( + ( + "postgresql+psycopg2://USER:PASS@/DB?host=hostA", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB" + "?host=hostA&host=hostB&host=hostC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": ",,", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB" + "?host=hostA&host=hostB:portB&host=hostC:portC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": ",portB,portC", + }, + ), + ( + "postgresql+psycopg2://USER:PASS@/DB?" + "host=hostA:portA&host=hostB:portB&host=hostC:portC", + { + "database": "DB", + "user": "USER", + "password": "PASS", + "host": "hostA,hostB,hostC", + "port": "portA,portB,portC", + }, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA:portA&host=hostB:portB&host=hostC:portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA:portA&host=hostB:portB&host=hostC:portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + ( + "postgresql+psycopg2:///" + "?host=hostA,hostB,hostC&port=portA,portB,portC", + {"host": "hostA,hostB,hostC", "port": "portA,portB,portC"}, + ), + argnames="url_string,expected", + ) + def test_psycopg_multi_hosts(self, url_string, expected): dialect = psycopg2_dialect.dialect() - url_string = "postgresql://USER:PASS@/DB?host=hostA" u = url.make_url(url_string) cargs, cparams = dialect.create_connect_args(u) eq_(cargs, []) - eq_(cparams["host"], "hostA") + eq_(cparams, expected) - def test_psycopg2_nonempty_connection_string_w_query_three(self): + @testing.combinations( + "postgresql+psycopg2:///?host=H&host=H&port=5432,5432", + "postgresql+psycopg2://user:pass@/dbname?host=H&host=H&port=5432,5432", + argnames="url_string", + ) + def test_psycopg_no_mix_hosts(self, url_string): dialect = psycopg2_dialect.dialect() - url_string = ( - "postgresql://USER:PASS@/DB" - "?host=hostA:portA&host=hostB&host=hostC" - ) - u = url.make_url(url_string) - cargs, cparams = dialect.create_connect_args(u) - eq_(cargs, []) - eq_(cparams["host"], "hostA:portA,hostB,hostC") + with expect_raises_message( + exc.ArgumentError, "Can't mix 'multihost' formats together" + ): + u = url.make_url(url_string) + dialect.create_connect_args(u) def test_psycopg2_disconnect(self): class Error(Exception): @@ -293,6 +356,41 @@ class Error(Exception): eq_(dialect.is_disconnect("not an error", None, None), False) +class BackendDialectTest(fixtures.TestBase): + __backend__ = True + + @testing.only_on(["+psycopg", "+psycopg2"]) + @testing.combinations( + "host=H:P&host=H:P&host=H:P", + "host=H:P&host=H&host=H", + "host=H:P&host=H&host=H:P", + "host=H&host=H:P&host=H", + "host=H,H,H&port=P,P,P", + ) + def test_connect_psycopg_multiple_hosts(self, pattern): + """test the fix for #4392""" + + tdb_url = testing.db.url + + host = tdb_url.host + if host == "127.0.0.1": + host = "localhost" + port = str(tdb_url.port) if tdb_url.port else "5432" + + query_str = pattern.replace("H", host).replace("P", port) + url_string = "%s://%s:" "%s@/%s?%s" % ( + tdb_url.drivername, + tdb_url.username, + tdb_url.password, + tdb_url.database, + query_str, + ) + + e = create_engine(url_string) + with e.connect() as conn: + eq_(conn.exec_driver_sql("select 1").scalar(), 1) + + class PGCodeTest(fixtures.TestBase): __only_on__ = "postgresql" From fd4fd153ca1b37e30b84db4ec476d12964673003 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Aug 2022 09:33:51 -0400 Subject: [PATCH 323/632] document @ sign in issue template, docs Fixes: #8328 Change-Id: I69a48c4499fe7e57aad242403186e69c4452b84b (cherry picked from commit 7f45bcd114c797105921e06789b3753e7d8f6daa) --- doc/build/core/engines.rst | 54 +++++++++++++++++++++++++------------- 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index 1f60ae6253f..7aa49f29d37 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -60,7 +60,9 @@ on a URL. These URLs follow `RFC-1738 `_, and usually can include username, password, hostname, database name as well as optional keyword arguments for additional configuration. In some cases a file path is accepted, and in others a "data source name" replaces -the "host" and "database" portions. The typical form of a database URL is:: +the "host" and "database" portions. The typical form of a database URL is: + +.. sourcecode:: plain dialect+driver://username:password@host:port/database @@ -71,31 +73,47 @@ the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend. +Escaping Special Characters such as @ signs in Passwords +---------------------------------------------------------- + As the URL is like any other URL, **special characters such as those that may -be used in the password need to be URL encoded to be parsed correctly.**. Below -is an example of a URL that includes the password ``"kx%jj5/g"``, where the -percent sign and slash characters are represented as ``%25`` and ``%2F``, -respectively:: +be used in the user and password need to be URL encoded to be parsed correctly.**. +**This includes the @ sign**. + +Below is an example of a URL that includes the password ``"kx@jj5/g"``, where the +"at" sign and slash characters are represented as ``%40`` and ``%2F``, +respectively: + +.. sourcecode:: plain - postgresql+pg8000://dbuser:kx%25jj5%2Fg@pghost10/appdb + postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb The encoding for the above password can be generated using `urllib.parse `_:: >>> import urllib.parse - >>> urllib.parse.quote_plus("kx%jj5/g") - 'kx%25jj5%2Fg' + >>> urllib.parse.quote_plus("kx@jj5/g") + 'kx%40jj5%2Fg' + +.. versionchanged:: 1.4 + + Support for ``@`` signs in hostnames and database names has been + fixed. As a side effect of this fix, ``@`` signs in passwords must be + escaped. + +Backend-Specific URLs +---------------------- Examples for common connection styles follow below. For a full index of detailed information on all included dialects as well as links to third-party dialects, see :ref:`dialect_toplevel`. PostgreSQL ----------- +^^^^^^^^^^ -The PostgreSQL dialect uses psycopg2 as the default DBAPI. pg8000 is -also available as a pure-Python substitute:: +The PostgreSQL dialect uses psycopg2 as the default DBAPI. Other +PostgreSQL DBAPIs include pg8000 and asyncpg:: # default engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') @@ -109,10 +127,10 @@ also available as a pure-Python substitute:: More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`. MySQL ------ +^^^^^^^^^^ -The MySQL dialect uses mysql-python as the default DBAPI. There are many -MySQL DBAPIs available, including MySQL-connector-python and OurSQL:: +The MySQL dialect uses mysqlclient as the default DBAPI. There are other +MySQL DBAPIs available, including PyMySQL:: # default engine = create_engine('mysql://scott:tiger@localhost/foo') @@ -126,7 +144,7 @@ MySQL DBAPIs available, including MySQL-connector-python and OurSQL:: More notes on connecting to MySQL at :ref:`mysql_toplevel`. Oracle ------- +^^^^^^^^^^ The Oracle dialect uses cx_oracle as the default DBAPI:: @@ -137,7 +155,7 @@ The Oracle dialect uses cx_oracle as the default DBAPI:: More notes on connecting to Oracle at :ref:`oracle_toplevel`. Microsoft SQL Server --------------------- +^^^^^^^^^^^^^^^^^^^^ The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: @@ -151,7 +169,7 @@ also available:: More notes on connecting to SQL Server at :ref:`mssql_toplevel`. SQLite ------- +^^^^^^^ SQLite connects to file-based databases, using the Python built-in module ``sqlite3`` by default. @@ -182,7 +200,7 @@ To use a SQLite ``:memory:`` database, specify an empty URL:: More notes on connecting to SQLite at :ref:`sqlite_toplevel`. Others ------- +^^^^^^ See :ref:`dialect_toplevel`, the top-level page for all additional dialect documentation. From 3e7d2cb147e16a6c46355dc784cc513ca17257e8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Aug 2022 11:34:35 -0400 Subject: [PATCH 324/632] fix up SAVEPOINT docs these contained a factual error that the entire session is expired, which is no longer the case (I can't find exactly when this was changed). Additionally, added a PostgreSQL specific example w/ IntegrityError as this is the most common case for this. Tried to tighten up other language and make it as clear as possible. Change-Id: I39160e7443964db59d1d5a2e0616084767813eea (cherry picked from commit 37b8c5e755cefd9ae0fdf6816dae09b519be3b06) --- doc/build/orm/session_transaction.rst | 80 ++++++++++++++++++++------- lib/sqlalchemy/engine/base.py | 2 +- 2 files changed, 61 insertions(+), 21 deletions(-) diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index ce370f2f0eb..c7df69f4292 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -146,36 +146,76 @@ method:: # commits u1 and u2 Each time :meth:`_orm.Session.begin_nested` is called, a new "BEGIN SAVEPOINT" -command is emitted to the database with a unique identifier. When -:meth:`_orm.SessionTransaction.commit` is called, "RELEASE SAVEPOINT" -is emitted on the database, and if instead -:meth:`_orm.SessionTransaction.rollback` is called, "ROLLBACK TO SAVEPOINT" -is emitted. - -:meth:`_orm.Session.begin_nested` may also be used as a context manager in the -same manner as that of the :meth:`_orm.Session.begin` method:: +command is emitted to the database within the scope of the current +database transaction (starting one if not already in progress), and +an object of type :class:`_orm.SessionTransaction` is returned, which +represents a handle to this SAVEPOINT. When +the ``.commit()`` method on this object is called, "RELEASE SAVEPOINT" +is emitted to the database, and if instead the ``.rollback()`` +method is called, "ROLLBACK TO SAVEPOINT" is emitted. The enclosing +database transaction remains in progress. + +:meth:`_orm.Session.begin_nested` is typically used as a context manager +where specific per-instance errors may be caught, in conjunction with +a rollback emitted for that portion of the transaction's state, without +rolling back the whole transaction, as in the example below:: for record in records: try: with session.begin_nested(): - session.merge(record) + session.merge(record) except: - print("Skipped record %s" % record) + print("Skipped record %s" % record) session.commit() -When :meth:`~.Session.begin_nested` is called, a -:meth:`~.Session.flush` is unconditionally issued -(regardless of the ``autoflush`` setting). This is so that when a -rollback on this nested transaction occurs, the full state of the -session is expired, thus causing all subsequent attribute/instance access to -reference the full state of the :class:`~sqlalchemy.orm.session.Session` right -before :meth:`~.Session.begin_nested` was called. +When the context manager yielded by :meth:`_orm.Session.begin_nested` +completes, it "commits" the savepoint, +which includes the usual behavior of flushing all pending state. When +an error is raised, the savepoint is rolled back and the state of the +:class:`_orm.Session` local to the objects that were changed is expired. + +This pattern is ideal for situations such as using PostgreSQL and +catching :class:`.IntegrityError` to detect duplicate rows; PostgreSQL normally +aborts the entire tranasction when such an error is raised, however when using +SAVEPOINT, the outer transaction is maintained. In the example below +a list of data is persisted into the database, with the occasional +"duplicate primary key" record skipped, without rolling back the entire +operation:: + + from sqlalchemy import exc + + with session.begin(): + for record in records: + try: + with session.begin_nested(): + obj = SomeRecord(id=record["identifier"], name=record["name"]) + session.add(obj) + except exc.IntegrityError: + print(f"Skipped record {record} - row already exists") + +When :meth:`~.Session.begin_nested` is called, the :class:`_orm.Session` first +flushes all currently pending state to the database; this occurs unconditionally, +regardless of the value of the :paramref:`_orm.Session.autoflush` parameter +which normally may be used to disable automatic flush. The rationale +for this behavior is so that +when a rollback on this nested transaction occurs, the :class:`_orm.Session` +may expire any in-memory state that was created within the scope of the +SAVEPOINT, while +ensuring that when those expired objects are refreshed, the state of the +object graph prior to the beginning of the SAVEPOINT will be available +to re-load from the database. + +In modern versions of SQLAlchemy, when a SAVEPOINT initiated by +:meth:`_orm.Session.begin_nested` is rolled back, in-memory object state that +was modified since the SAVEPOINT was created +is expired, however other object state that was not altered since the SAVEPOINT +began is maintained. This is so that subsequent operations can continue to make use of the +otherwise unaffected data +without the need for refreshing it from the database. .. seealso:: - :class:`_engine.NestedTransaction` - the :class:`.NestedTransaction` class is the - Core-level construct that is used by the :class:`_orm.Session` internally - to produce SAVEPOINT blocks. + :meth:`_engine.Connection.begin_nested` - Core SAVEPOINT API .. _orm_session_vs_engine: diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 8a8cab140b0..f126eb0c56e 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -897,7 +897,7 @@ def begin_nested(self): :meth:`_engine.Connection.begin` - :meth:`_engine.Connection.begin_twophase` + :ref:`session_begin_nested` - ORM support for SAVEPOINT """ if self._is_future: From dd9be7039496a9ad7f8a8e812644110383e46ec2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Aug 2022 11:45:09 -0400 Subject: [PATCH 325/632] fix mypy test this change seems to be from 2.0 in 4fe222d9412df30fc15ace3d7a however does not apply to 1.4. Change-Id: Ie736afaf18abf048f4bf5f5266e76aefa98e4e80 --- test/ext/mypy/files/typing_err3.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/ext/mypy/files/typing_err3.py b/test/ext/mypy/files/typing_err3.py index 3fd1f86ff60..a81ea067c79 100644 --- a/test/ext/mypy/files/typing_err3.py +++ b/test/ext/mypy/files/typing_err3.py @@ -22,6 +22,8 @@ class User(Base): id = Column(Integer, primary_key=True) + # note this goes away in 2.0 for the moment + # EXPECTED_MYPY: Unexpected keyword argument "wrong_arg" for "RelationshipProperty" # noqa addresses: Mapped[List["Address"]] = relationship( "Address", wrong_arg="imwrong" ) From 8b004a87a175941dffa1847cc8d4e0e7bf272d48 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Aug 2022 14:51:49 -0400 Subject: [PATCH 326/632] reword yield_per a bit more I'm still not satisified with this section as it is still too wordy and dense, but at least let's put a better description of what yield_per actually is and why one might use it at the top. Change-Id: I10f4d862d9c499044f5718fca0d27ac106289717 (cherry picked from commit 3ef9fa6d4ff8ade8915000b41c262caf4a88e064) --- doc/build/orm/queryguide.rst | 45 ++++++++++++++++++++++++++++-------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 9bc585d3997..2a575354c60 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -998,12 +998,43 @@ The ``autoflush`` execution option is equvialent to the .. _orm_queryguide_yield_per: -Yield Per -^^^^^^^^^ +Fetching Large Result Sets with Yield Per +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The ``yield_per`` execution option is an integer value which will cause the -:class:`_engine.Result` to yield only a fixed count of rows at a time. -When used as an execution option, ``yield_per`` is equivalent to making use +:class:`_engine.Result` to buffer only limited number of rows and/or ORM +objects at a time, before making data available to the client. + +Normally, the ORM will construct ORM objects for **all** rows up front, +assembling them into a single buffer, before passing this buffer to +the :class:`_engine.Result` object as a source of rows to be returned. +The rationale for this behavior is to allow correct behavior +for features such as joined eager loading, uniquifying of results, and the +general case of result handling logic that relies upon the identity map +maintaining a consistent state for every object in a result set as it is +fetched. + +The purpose of the ``yield_per`` option is to change this behavior so that the +ORM result set is optimized for iteration through very large result sets (> 10K +rows), where the user has determined that the above patterns don't apply. When +``yield_per`` is used, the ORM will instead batch ORM results into +sub-collections and yield rows from each sub-collection individually as the +:class:`_engine.Result` object is iterated, so that the Python interpreter +doesn't need to declare very large areas of memory which is both time consuming +and leads to excessive memory use. The option affects both the way the database +cursor is used as well as how the ORM constructs rows and objects to be +passed to the :class:`_engine.Result`. + +.. tip:: + + From the above, it follows that the :class:`_engine.Result` must be + consumed in an iterable fashion, that is, using iteration such as + ``for row in result`` or using partial row methods such as + :meth:`_engine.Result.fetchmany` or :meth:`_engine.Result.partitions`. + Calling :meth:`_engine.Result.all` will defeat the purpose of using + ``yield_per``. + +Using ``yield_per`` is equivalent to making use of both the :paramref:`_engine.Connection.execution_options.stream_results` execution option, which selects for server side cursors to be used by the backend if supported, and the :meth:`_engine.Result.yield_per` method @@ -1066,12 +1097,6 @@ partitions. The size of each partition defaults to the integer value passed to (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) ... -The purpose of "yield per" is when fetching very large result sets -(> 10K rows), to batch results in sub-collections and yield them -out partially, so that the Python interpreter doesn't need to declare -very large areas of memory which is both time consuming and leads -to excessive memory use. - When ``yield_per`` is used, the :paramref:`_engine.Connection.execution_options.stream_results` option is also set for the Core execution, so that a streaming / server side cursor will be From d62ebdefbb49d0f2fb6dce3f957a2254d894bdb7 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Fri, 22 Jul 2022 08:31:24 -0400 Subject: [PATCH 327/632] Fix 'No transaction found' error on Synapse. Fixed issue where the SQL Server dialect's query for the current isolation level would fail on Azure Synapse Analytics, due to the way in which this database handles transaction rollbacks after an error has occurred. The initial query has been modified to no longer rely upon catching an error when attempting to detect the appropriate system view. Additionally, to better support this database's very specific "rollback" behavior, implemented new parameter ``ignore_no_transaction_on_rollback`` indicating that a rollback should ignore Azure Synapse error 'No corresponding transaction found. (111214)', which is raised if no transaction is present in conflict with the Python DBAPI. Fixes: #8231 Closes: #8233 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8233 Pull-request-sha: c48bd44a9f53d00e5e94f1b8bf996711b6419562 Change-Id: I6407a03148f45cc9eba8fe1d31d4f59ebf9c7ef7 (cherry picked from commit 8fe3cd69c5f2d8f73e75fb19ae929273282fba57) --- doc/build/changelog/unreleased_14/8231.rst | 20 ++++++ lib/sqlalchemy/dialects/mssql/base.py | 78 +++++++++++++--------- lib/sqlalchemy/dialects/mssql/pyodbc.py | 28 ++++++++ test/dialect/mssql/test_engine.py | 74 +++++++++++++++++--- 4 files changed, 157 insertions(+), 43 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8231.rst diff --git a/doc/build/changelog/unreleased_14/8231.rst b/doc/build/changelog/unreleased_14/8231.rst new file mode 100644 index 00000000000..401ab717e5d --- /dev/null +++ b/doc/build/changelog/unreleased_14/8231.rst @@ -0,0 +1,20 @@ +.. change:: + :tags: bug, mssql + :tickets: 8231 + + Fixed issue where the SQL Server dialect's query for the current isolation + level would fail on Azure Synapse Analytics, due to the way in which this + database handles transaction rollbacks after an error has occurred. The + initial query has been modified to no longer rely upon catching an error + when attempting to detect the appropriate system view. Additionally, to + better support this database's very specific "rollback" behavior, + implemented new parameter ``ignore_no_transaction_on_rollback`` indicating + that a rollback should ignore Azure Synapse error 'No corresponding + transaction found. (111214)', which is raised if no transaction is present + in conflict with the Python DBAPI. + + Initial patch and valuable debugging assistance courtesy of @ww2406. + + .. seealso:: + + :ref:`azure_synapse_ignore_no_transaction_on_rollback` diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 3c22b9b7c96..2d1d0f7008e 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2752,6 +2752,7 @@ def __init__( json_serializer=None, json_deserializer=None, legacy_schema_aliasing=None, + ignore_no_transaction_on_rollback=False, **opts ): self.query_timeout = int(query_timeout or 0) @@ -2759,6 +2760,9 @@ def __init__( self.use_scope_identity = use_scope_identity self.deprecate_large_types = deprecate_large_types + self.ignore_no_transaction_on_rollback = ( + ignore_no_transaction_on_rollback + ) if legacy_schema_aliasing is not None: util.warn_deprecated( @@ -2783,6 +2787,22 @@ def do_release_savepoint(self, connection, name): # SQL Server does not support RELEASE SAVEPOINT pass + def do_rollback(self, dbapi_connection): + try: + super(MSDialect, self).do_rollback(dbapi_connection) + except self.dbapi.ProgrammingError as e: + if self.ignore_no_transaction_on_rollback and re.match( + r".*\b111214\b", str(e) + ): + util.warn( + "ProgrammingError 111214 " + "'No corresponding transaction found.' " + "has been suppressed via " + "ignore_no_transaction_on_rollback=True" + ) + else: + raise + _isolation_lookup = set( [ "SERIALIZABLE", @@ -2807,48 +2827,42 @@ def set_isolation_level(self, connection, level): if level == "SNAPSHOT": connection.commit() - def get_isolation_level(self, connection): - last_error = None + def get_isolation_level(self, dbapi_connection): + cursor = dbapi_connection.cursor() + try: + cursor.execute( + "SELECT name FROM sys.system_views WHERE name IN " + "('dm_exec_sessions', 'dm_pdw_nodes_exec_sessions')" + ) + row = cursor.fetchone() + if not row: + raise NotImplementedError( + "Can't fetch isolation level on this particular " + "SQL Server version." + ) - views = ("sys.dm_exec_sessions", "sys.dm_pdw_nodes_exec_sessions") - for view in views: - cursor = connection.cursor() - try: - cursor.execute( - """ - SELECT CASE transaction_isolation_level + view_name = "sys.{}".format(row[0]) + cursor.execute( + """ + SELECT CASE transaction_isolation_level WHEN 0 THEN NULL WHEN 1 THEN 'READ UNCOMMITTED' WHEN 2 THEN 'READ COMMITTED' WHEN 3 THEN 'REPEATABLE READ' WHEN 4 THEN 'SERIALIZABLE' WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL - FROM %s + FROM {} where session_id = @@SPID - """ - % view + """.format( + view_name ) - val = cursor.fetchone()[0] - except self.dbapi.Error as err: - # Python3 scoping rules - last_error = err - continue - else: - return val.upper() - finally: - cursor.close() - else: - # note that the NotImplementedError is caught by - # DefaultDialect, so the warning here is all that displays - util.warn( - "Could not fetch transaction isolation level, " - "tried views: %s; final error was: %s" % (views, last_error) - ) - raise NotImplementedError( - "Can't fetch isolation level on this particular " - "SQL Server version. tried views: %s; final error was: %s" - % (views, last_error) ) + row = cursor.fetchone() + assert row is not None + val = row[0] + finally: + cursor.close() + return val.upper() def initialize(self, connection): super(MSDialect, self).initialize(connection) diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 91e8fd6b5a0..edb76f26525 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -155,6 +155,34 @@ def provide_token(dialect, conn_rec, cargs, cparams): stating that a connection string when using an access token must not contain ``UID``, ``PWD``, ``Authentication`` or ``Trusted_Connection`` parameters. +.. _azure_synapse_ignore_no_transaction_on_rollback: + +Avoiding transaction-related exceptions on Azure Synapse Analytics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Azure Synapse Analytics has a significant difference in its transaction +handling compared to plain SQL Server; in some cases an error within a Synapse +transaction can cause it to be arbitrarily terminated on the server side, which +then causes the DBAPI ``.rollback()`` method (as well as ``.commit()``) to +fail. The issue prevents the usual DBAPI contract of allowing ``.rollback()`` +to pass silently if no transaction is present as the driver does not expect +this condition. The symptom of this failure is an exception with a message +resembling 'No corresponding transaction found. (111214)' when attempting to +emit a ``.rollback()`` after an operation had a failure of some kind. + +This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to +the SQL Server dialect via the :func:`_sa.create_engine` function as follows:: + + engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True) + +Using the above parameter, the dialect will catch ``ProgrammingError`` +exceptions raised during ``connection.rollback()`` and emit a warning +if the error message contains code ``111214``, however will not raise +an exception. + +.. versionadded:: 1.4.40 Added the + ``ignore_no_transaction_on_rollback=True`` parameter. + Enable autocommit for Azure SQL Data Warehouse (DW) connections ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index b5a04f1405b..af8db861611 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -1,6 +1,7 @@ # -*- encoding: utf-8 from decimal import Decimal +import re from sqlalchemy import Column from sqlalchemy import event @@ -23,6 +24,7 @@ from sqlalchemy.testing import engines from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock @@ -647,7 +649,7 @@ def test_isolation_level(self, metadata): class IsolationLevelDetectTest(fixtures.TestBase): - def _fixture(self, view): + def _fixture(self, view_result): class Error(Exception): pass @@ -660,15 +662,25 @@ class Error(Exception): def fail_on_exec( stmt, ): - if view is not None and view in stmt: + result[:] = [] + if "SELECT name FROM sys.system_views" in stmt: + if view_result: + result.append((view_result,)) + elif re.match( + ".*SELECT CASE transaction_isolation_level.*FROM sys.%s" + % (view_result,), + stmt, + re.S, + ): result.append(("SERIALIZABLE",)) else: - raise Error("that didn't work") + assert False connection = Mock( cursor=Mock( return_value=Mock( - execute=fail_on_exec, fetchone=lambda: result[0] + execute=fail_on_exec, + fetchone=lambda: result[0] if result else None, ) ) ) @@ -688,13 +700,12 @@ def test_exec_sessions(self): def test_not_supported(self): dialect, connection = self._fixture(None) - with expect_warnings("Could not fetch transaction isolation level"): - assert_raises_message( - NotImplementedError, - "Can't fetch isolation", - dialect.get_isolation_level, - connection, - ) + assert_raises_message( + NotImplementedError, + "Can't fetch isolation level on this particular ", + dialect.get_isolation_level, + connection, + ) class InvalidTransactionFalsePositiveTest(fixtures.TablesTest): @@ -732,3 +743,44 @@ def test_invalid_transaction_detection(self, connection): # "Can't reconnect until invalid transaction is rolled back." result = connection.execute(t.select()).fetchall() eq_(len(result), 1) + + +class IgnoreNotransOnRollbackTest(fixtures.TestBase): + def test_ignore_no_transaction_on_rollback(self): + """test #8231""" + + class ProgrammingError(Exception): + pass + + dialect = base.dialect(ignore_no_transaction_on_rollback=True) + dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) + + connection = mock.Mock( + rollback=mock.Mock( + side_effect=ProgrammingError("Error 111214 happened") + ) + ) + with expect_warnings( + "ProgrammingError 111214 'No corresponding transaction found.' " + "has been suppressed via ignore_no_transaction_on_rollback=True" + ): + dialect.do_rollback(connection) + + def test_other_programming_error_on_rollback(self): + """test #8231""" + + class ProgrammingError(Exception): + pass + + dialect = base.dialect(ignore_no_transaction_on_rollback=True) + dialect.dbapi = mock.Mock(ProgrammingError=ProgrammingError) + + connection = mock.Mock( + rollback=mock.Mock( + side_effect=ProgrammingError("Some other error happened") + ) + ) + with expect_raises_message( + ProgrammingError, "Some other error happened" + ): + dialect.do_rollback(connection) From ac463218f45255f92c5ef0390c337fe60506c917 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Aug 2022 12:08:54 -0400 Subject: [PATCH 328/632] send in the dragons on async_scoped_session make it clear that async_scoped_session.remove() must be called, else memory will build up. Generally discourage the whole pattern as well, as this is a "framework" pattern and we don't really want to be supporting frameworks. Also indicate that scopefunc must be idempotent and lightweight. Fixes: #8340 Change-Id: Ibc3d21124ae73c3b25ee51966504bbb1975c36b2 (cherry picked from commit c2327ec60f3f3b52a4b3a0daeef39174d96d225e) --- doc/build/orm/extensions/asyncio.rst | 35 ++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 8dec8991a88..c21d561b6bd 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -731,10 +731,21 @@ from using any connection more than once:: Using asyncio scoped session ---------------------------- -The usage of :class:`_asyncio.async_scoped_session` is mostly similar to -:class:`.scoped_session`. However, since there's no "thread-local" concept in -the asyncio context, the "scopefunc" parameter must be provided to the -constructor:: +The "scoped session" pattern used in threaded SQLAlchemy with the +:class:`.scoped_session` object is also available in asyncio, using +an adapted version called :class:`_asyncio.async_scoped_session`. + +.. tip:: SQLAlchemy generally does not recommend the "scoped" pattern + for new development as it relies upon mutable global state that must also be + explicitly torn down when work within the thread or task is complete. + Particularly when using asyncio, it's likely a better idea to pass the + :class:`_asyncio.AsyncSession` directly to the awaitable functions that need + it. + +When using :class:`_asyncio.async_scoped_session`, as there's no "thread-local" +concept in the asyncio context, the "scopefunc" parameter must be provided to +the constructor. The example below illustrates using the +``asyncio.current_task()`` function for this purpose:: from asyncio import current_task @@ -747,7 +758,21 @@ constructor:: some_async_session = AsyncScopedSession() -:class:`_asyncio.async_scoped_session` also includes **proxy +.. warning:: The "scopefunc" used by :class:`_asyncio.async_scoped_session` + is invoked **an arbitrary number of times** within a task, once for each + time the underlying :class:`_asyncio.AsyncSession` is accessed. The function + should therefore be **idempotent** and lightweight, and should not attempt + to create or mutate any state, such as establishing callbacks, etc. + +.. warning:: Using ``current_task()`` for the "key" in the scope requires that + the :meth:`_asyncio.async_scoped_session.remove` method is called from + within the outermost awaitable, to ensure the key is removed from the + registry when the task is complete, otherwise the task handle as well as + the :class:`_asyncio.AsyncSession` will remain in memory, essentially + creating a memory leak. See the following example which illustrates + the correct use of :meth:`_asyncio.async_scoped_session.remove`. + +:class:`_asyncio.async_scoped_session` includes **proxy behavior** similar to that of :class:`.scoped_session`, which means it can be treated as a :class:`_asyncio.AsyncSession` directly, keeping in mind that the usual ``await`` keywords are necessary, including for the From 98e4425f2fe99daa3045350eaab679452fd1e9e6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabian=20Prei=C3=9F?= Date: Wed, 3 Aug 2022 22:25:19 +0200 Subject: [PATCH 329/632] glossary/association relationship: role->role_name (#8331) (cherry picked from commit b5485fe41828c21a555d0d5a6abf29ceb3b3147f) --- doc/build/glossary.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 28456cd16cb..9c8f01d02ef 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -1341,8 +1341,8 @@ Glossary emp2 = Employee(name="emp2") proj.project_employees.extend([ - EmployeeProject(employee=emp1, role="tech lead"), - EmployeeProject(employee=emp2, role="account executive") + EmployeeProject(employee=emp1, role_name="tech lead"), + EmployeeProject(employee=emp2, role_name="account executive") ]) .. seealso:: From b2f550493e3bd3721d193d33eccae9852f1b39fa Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Aug 2022 18:09:39 -0400 Subject: [PATCH 330/632] adjust mysql utf test we've updated mysql on jenkins and this test seems to need a small adjustment Change-Id: I21508f667700cf8f3200f15af501a66a85f48779 (cherry picked from commit ea6fb4ff5bcffcf71cdbc587504f10f03fe921ca) --- test/dialect/mysql/test_types.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/dialect/mysql/test_types.py b/test/dialect/mysql/test_types.py index 3afe6c38538..017fad3cff6 100644 --- a/test/dialect/mysql/test_types.py +++ b/test/dialect/mysql/test_types.py @@ -513,8 +513,8 @@ def test_charset_collate_table(self, metadata, connection): ) t.create(connection) t2 = Table("foo", MetaData(), autoload_with=connection) - eq_(t2.kwargs["mysql_collate"], "utf8_bin") - eq_(t2.kwargs["mysql_default charset"], "utf8") + assert t2.kwargs["mysql_collate"] in ("utf8_bin", "utf8mb3_bin") + assert t2.kwargs["mysql_default charset"] in ("utf8", "utf8mb3") # test [ticket:2906] # in order to test the condition here, need to use From 042b844245b1c0dc3cea6fe555ab8859ea92c258 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 3 Aug 2022 20:47:50 -0400 Subject: [PATCH 331/632] more mysql 8.0.30 fixes Change-Id: I9df3506f364f4721404cf2022486bc31fd5c2ce6 (cherry picked from commit 0b57cc9564caba442febb76a224a96c2b263ba10) --- test/dialect/mysql/test_reflection.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/dialect/mysql/test_reflection.py b/test/dialect/mysql/test_reflection.py index 60d7e3a5dd3..4c763a6483b 100644 --- a/test/dialect/mysql/test_reflection.py +++ b/test/dialect/mysql/test_reflection.py @@ -370,7 +370,10 @@ def test_reflection_with_table_options(self, metadata, connection): assert reflected.comment == comment assert reflected.kwargs["mysql_comment"] == comment - assert reflected.kwargs["mysql_default charset"] == "utf8" + assert reflected.kwargs["mysql_default charset"] in ( + "utf8", + "utf8mb3", + ) assert reflected.kwargs["mysql_avg_row_length"] == "3" assert reflected.kwargs["mysql_connection"] == "fish" From 4a50ec453543cc9fb76bb1a57d539d31207e7c35 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 4 Aug 2022 09:26:47 -0400 Subject: [PATCH 332/632] update quoted_name doc Fixes: #8339 Change-Id: If78bc9babfdc6a4dde4e65d72858ac7a402cbb4d (cherry picked from commit dc5a1c482ee1a8faf15fd81db866e5f72c1c719f) --- lib/sqlalchemy/sql/elements.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index c9cea23dadd..268c0d6ac4d 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -5149,10 +5149,11 @@ class quoted_name(util.MemoizedSlots, util.text_type): an unconditionally quoted name:: from sqlalchemy import create_engine + from sqlalchemy import inspect from sqlalchemy.sql import quoted_name engine = create_engine("oracle+cx_oracle://some_dsn") - engine.has_table(quoted_name("some_table", True)) + print(inspect(engine).has_table(quoted_name("some_table", True))) The above logic will run the "has table" logic against the Oracle backend, passing the name exactly as ``"some_table"`` without converting to From 7914b6491b31e07d2aa0313a97a0ded27627da07 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 5 Aug 2022 17:25:05 -0400 Subject: [PATCH 333/632] deep compare CTEs before considering them conflicting Fixed issue where referencing a CTE multiple times in conjunction with a polymorphic SELECT could result in multiple "clones" of the same CTE being constructed, which would then trigger these two CTEs as duplicates. To resolve, the two CTEs are deep-compared when this occurs to ensure that they are equivalent, then are treated as equivalent. Fixes: #8357 Change-Id: I1f634a9cf7a6c4256912aac1a00506aecea3b0e2 (cherry picked from commit 85fa363c846f4ed287565c43c32e2cca29470e25) --- doc/build/changelog/unreleased_14/8357.rst | 10 +++++ lib/sqlalchemy/sql/compiler.py | 17 +++++++-- test/orm/inheritance/test_polymorphic_rel.py | 39 ++++++++++++++++++++ test/sql/test_cte.py | 34 +++++++++++++---- 4 files changed, 88 insertions(+), 12 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8357.rst diff --git a/doc/build/changelog/unreleased_14/8357.rst b/doc/build/changelog/unreleased_14/8357.rst new file mode 100644 index 00000000000..129368bd1c3 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8357.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 8357 + + Fixed issue where referencing a CTE multiple times in conjunction with a + polymorphic SELECT could result in multiple "clones" of the same CTE being + constructed, which would then trigger these two CTEs as duplicates. To + resolve, the two CTEs are deep-compared when this occurs to ensure that + they are equivalent, then are treated as equivalent. + diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 330f3c3bc86..c9b6ba670c2 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -2708,10 +2708,19 @@ def visit_cte( del self.level_name_by_cte[existing_cte_reference_cte] else: - raise exc.CompileError( - "Multiple, unrelated CTEs found with " - "the same name: %r" % cte_name - ) + # if the two CTEs are deep-copy identical, consider them + # the same, **if** they are clones, that is, they came from + # the ORM or other visit method + if ( + cte._is_clone_of is not None + or existing_cte._is_clone_of is not None + ) and cte.compare(existing_cte): + is_new_cte = False + else: + raise exc.CompileError( + "Multiple, unrelated CTEs found with " + "the same name: %r" % cte_name + ) if not asfrom and not is_new_cte: return None diff --git a/test/orm/inheritance/test_polymorphic_rel.py b/test/orm/inheritance/test_polymorphic_rel.py index aa8d9eaec68..9ccec61ee12 100644 --- a/test/orm/inheritance/test_polymorphic_rel.py +++ b/test/orm/inheritance/test_polymorphic_rel.py @@ -1,5 +1,6 @@ from sqlalchemy import desc from sqlalchemy import exc as sa_exc +from sqlalchemy import exists from sqlalchemy import func from sqlalchemy import select from sqlalchemy import testing @@ -64,6 +65,44 @@ def insert_data(cls, connection): ) e1, e2, e3, b1, m1 = cls.e1, cls.e2, cls.e3, cls.b1, cls.m1 + @testing.requires.ctes + def test_cte_clone_issue(self): + """test #8357""" + + sess = fixture_session() + + cte = select(Engineer.person_id).cte(name="test_cte") + + stmt = ( + select(Engineer) + .where(exists().where(Engineer.person_id == cte.c.person_id)) + .where(exists().where(Engineer.person_id == cte.c.person_id)) + ).order_by(Engineer.person_id) + + self.assert_compile( + stmt, + "WITH test_cte AS (SELECT engineers.person_id AS person_id " + "FROM people JOIN engineers ON people.person_id = " + "engineers.person_id) SELECT engineers.person_id, " + "people.person_id AS person_id_1, people.company_id, " + "people.name, people.type, engineers.status, " + "engineers.engineer_name, engineers.primary_language FROM people " + "JOIN engineers ON people.person_id = engineers.person_id WHERE " + "(EXISTS (SELECT * FROM test_cte WHERE engineers.person_id = " + "test_cte.person_id)) AND (EXISTS (SELECT * FROM test_cte " + "WHERE engineers.person_id = test_cte.person_id)) " + "ORDER BY engineers.person_id", + ) + result = sess.scalars(stmt) + eq_( + result.all(), + [ + Engineer(name="dilbert"), + Engineer(name="wally"), + Engineer(name="vlad"), + ], + ) + def test_loads_at_once(self): """ Test that all objects load from the full query, when diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index d146ae60664..fed371f6294 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -486,20 +486,38 @@ def test_recursive_union_alias_four(self): "SELECT cs1.x, cs2.x AS x_1 FROM bar AS cs1, cte AS cs2", ) - def test_conflicting_names(self): + @testing.combinations(True, False, argnames="identical") + @testing.combinations(True, False, argnames="use_clone") + def test_conflicting_names(self, identical, use_clone): """test a flat out name conflict.""" s1 = select(1) c1 = s1.cte(name="cte1", recursive=True) - s2 = select(1) - c2 = s2.cte(name="cte1", recursive=True) + if use_clone: + c2 = c1._clone() + if not identical: + c2 = c2.union(select(2)) + else: + if identical: + s2 = select(1) + else: + s2 = select(column("q")) + c2 = s2.cte(name="cte1", recursive=True) s = select(c1, c2) - assert_raises_message( - CompileError, - "Multiple, unrelated CTEs found " "with the same name: 'cte1'", - s.compile, - ) + + if use_clone and identical: + self.assert_compile( + s, + 'WITH RECURSIVE cte1("1") AS (SELECT 1) SELECT cte1.1, ' + 'cte1.1 AS "1_1" FROM cte1', + ) + else: + assert_raises_message( + CompileError, + "Multiple, unrelated CTEs found " "with the same name: 'cte1'", + s.compile, + ) def test_with_recursive_no_name_currently_buggy(self): s1 = select(1) From c539ee35229b03d61f2a10e9f5ab613201341e19 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Aug 2022 12:08:30 -0400 Subject: [PATCH 334/632] repair doc warnings Change-Id: I446105028539a34da90d6b8ae4812965cc398ee5 --- doc/build/changelog/changelog_14.rst | 8 ++++---- doc/build/changelog/unreleased_14/7249.rst | 4 ++-- doc/build/core/engines.rst | 4 ++-- lib/sqlalchemy/dialects/mssql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index cf221b725d8..41ec3f7970d 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -155,7 +155,7 @@ This document details individual issue-level changes made throughout with a :class:`.Numeric` datatype would produce errors when attempting to reconcile the "autoincrement" column, preventing construction of the :class:`.Column` from using the :paramref:`.Column.autoincrement` parameter - as well as emitting errors when attempting to invoke an :class:`.Insert` + as well as emitting errors when attempting to invoke an :class:`_dml.Insert` construct. @@ -195,7 +195,7 @@ This document details individual issue-level changes made throughout :tickets: 8073 An informative error is raised for the use case where - :meth:`.Insert.from_select` is being passed a "compound select" object such + :meth:`_dml.Insert.from_select` is being passed a "compound select" object such as a UNION, yet the INSERT statement needs to append additional columns to support Python-side or explicit SQL defaults from the table metadata. In this case a subquery of the compound object should be passed. @@ -233,8 +233,8 @@ This document details individual issue-level changes made throughout Fixed an issue where using :func:`.bindparam` with no explicit data or type given could be coerced into the incorrect type when used in expressions - such as when using :meth:`.ARRAY.Comparator.any` and - :meth:`.ARRAY.Comparator.all`. + such as when using :meth:`_types.ARRAY.Comparator.any` and + :meth:`_types.ARRAY.Comparator.all`. .. change:: diff --git a/doc/build/changelog/unreleased_14/7249.rst b/doc/build/changelog/unreleased_14/7249.rst index 5d0cb658187..5244a65a207 100644 --- a/doc/build/changelog/unreleased_14/7249.rst +++ b/doc/build/changelog/unreleased_14/7249.rst @@ -3,5 +3,5 @@ :tickets: 7249 Fixed issue where :class:`.TypeDecorator` would not correctly proxy the - ``__getitem__()`` operator when decorating the :class:`.ARRAY` datatype, - without explicit workarounds. + ``__getitem__()`` operator when decorating the :class:`_types.ARRAY` + datatype, without explicit workarounds. diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index 7aa49f29d37..be14536919a 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -62,7 +62,7 @@ hostname, database name as well as optional keyword arguments for additional con In some cases a file path is accepted, and in others a "data source name" replaces the "host" and "database" portions. The typical form of a database URL is: -.. sourcecode:: plain +.. sourcecode:: none dialect+driver://username:password@host:port/database @@ -84,7 +84,7 @@ Below is an example of a URL that includes the password ``"kx@jj5/g"``, where th "at" sign and slash characters are represented as ``%40`` and ``%2F``, respectively: -.. sourcecode:: plain +.. sourcecode:: none postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 2d1d0f7008e..ee6ce87696d 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -434,7 +434,7 @@ class TestTable(Base): * ``SNAPSHOT`` - specific to SQL Server There are also more options for isolation level configurations, such as -"sub-engine" objects linked to a main :class:`.Engine` which each apply +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply different isolation level settings. See the discussion at :ref:`dbapi_autocommit` for background. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 95e9cd1b37d..111c63bff16 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -239,7 +239,7 @@ ``SELECT @@autocommit;``. There are also more options for isolation level configurations, such as -"sub-engine" objects linked to a main :class:`.Engine` which each apply +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply different isolation level settings. See the discussion at :ref:`dbapi_autocommit` for background. diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 23d4a49feeb..eb841700d3b 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -179,7 +179,7 @@ def use_identity(element, compiler, **kw): # ... work with transaction There are also more options for isolation level configurations, such as -"sub-engine" objects linked to a main :class:`.Engine` which each apply +"sub-engine" objects linked to a main :class:`_engine.Engine` which each apply different isolation level settings. See the discussion at :ref:`dbapi_autocommit` for background. From f297ed07379e13cb4f130d83c0fcbf2410d663ff Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Aug 2022 12:10:25 -0400 Subject: [PATCH 335/632] - 1.4.40 --- doc/build/changelog/changelog_14.rst | 173 ++++++++++++++++++++- doc/build/changelog/unreleased_14/4392.rst | 22 --- doc/build/changelog/unreleased_14/7249.rst | 7 - doc/build/changelog/unreleased_14/8145.rst | 14 -- doc/build/changelog/unreleased_14/8190.rst | 8 - doc/build/changelog/unreleased_14/8196.rst | 7 - doc/build/changelog/unreleased_14/8210.rst | 8 - doc/build/changelog/unreleased_14/8231.rst | 20 --- doc/build/changelog/unreleased_14/8235.rst | 16 -- doc/build/changelog/unreleased_14/8253.rst | 10 -- doc/build/changelog/unreleased_14/8357.rst | 10 -- doc/build/changelog/unreleased_14/yp.rst | 38 ----- doc/build/conf.py | 4 +- 13 files changed, 174 insertions(+), 163 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/4392.rst delete mode 100644 doc/build/changelog/unreleased_14/7249.rst delete mode 100644 doc/build/changelog/unreleased_14/8145.rst delete mode 100644 doc/build/changelog/unreleased_14/8190.rst delete mode 100644 doc/build/changelog/unreleased_14/8196.rst delete mode 100644 doc/build/changelog/unreleased_14/8210.rst delete mode 100644 doc/build/changelog/unreleased_14/8231.rst delete mode 100644 doc/build/changelog/unreleased_14/8235.rst delete mode 100644 doc/build/changelog/unreleased_14/8253.rst delete mode 100644 doc/build/changelog/unreleased_14/8357.rst delete mode 100644 doc/build/changelog/unreleased_14/yp.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 41ec3f7970d..217f4669030 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,178 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.40 - :include_notes_from: unreleased_14 + :released: August 8, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8357 + + Fixed issue where referencing a CTE multiple times in conjunction with a + polymorphic SELECT could result in multiple "clones" of the same CTE being + constructed, which would then trigger these two CTEs as duplicates. To + resolve, the two CTEs are deep-compared when this occurs to ensure that + they are equivalent, then are treated as equivalent. + + + .. change:: + :tags: bug, orm, declarative + :tickets: 8190 + + Fixed issue where a hierarchy of classes set up as an abstract or mixin + declarative classes could not declare standalone columns on a superclass + that would then be copied correctly to a :class:`_orm.declared_attr` + callable that wanted to make use of them on a descendant class. + + .. change:: + :tags: bug, types + :tickets: 7249 + + Fixed issue where :class:`.TypeDecorator` would not correctly proxy the + ``__getitem__()`` operator when decorating the :class:`_types.ARRAY` + datatype, without explicit workarounds. + + .. change:: + :tags: bug, asyncio + :tickets: 8145 + + Added ``asyncio.shield()`` to the connection and session release process + specifically within the ``__aexit__()`` context manager exit, when using + :class:`.AsyncConnection` or :class:`.AsyncSession` as a context manager + that releases the object when the context manager is complete. This appears + to help with task cancellation when using alternate concurrency libraries + such as ``anyio``, ``uvloop`` that otherwise don't provide an async context + for the connection pool to release the connection properly during task + cancellation. + + + + .. change:: + :tags: bug, postgresql + :tickets: 4392 + + Fixed issue in psycopg2 dialect where the "multiple hosts" feature + implemented for :ticket:`4392`, where multiple ``host:port`` pairs could be + passed in the query string as + ``?host=host1:port1&host=host2:port2&host=host3:port3`` was not implemented + correctly, as it did not propagate the "port" parameter appropriately. + Connections that didn't use a different "port" likely worked without issue, + and connections that had "port" for some of the entries may have + incorrectly passed on that hostname. The format is now corrected to pass + hosts/ports appropriately. + + As part of this change, maintained support for another multihost style that + worked unintentionally, which is comma-separated + ``?host=h1,h2,h3&port=p1,p2,p3``. This format is more consistent with + libpq's query-string format, whereas the previous format is inspired by a + different aspect of libpq's URI format but is not quite the same thing. + + If the two styles are mixed together, an error is raised as this is + ambiguous. + + .. change:: + :tags: bug, sql + :tickets: 8253 + + Adjusted the SQL compilation for string containment functions + ``.contains()``, ``.startswith()``, ``.endswith()`` to force the use of the + string concatenation operator, rather than relying upon the overload of the + addition operator, so that non-standard use of these operators with for + example bytestrings still produces string concatenation operators. + + + .. change:: + :tags: bug, orm + :tickets: 8235 + + A :func:`_sql.select` construct that is passed a sole '*' argument for + ``SELECT *``, either via string, :func:`_sql.text`, or + :func:`_sql.literal_column`, will be interpreted as a Core-level SQL + statement rather than as an ORM level statement. This is so that the ``*``, + when expanded to match any number of columns, will result in all columns + returned in the result. the ORM- level interpretation of + :func:`_sql.select` needs to know the names and types of all ORM columns up + front which can't be achieved when ``'*'`` is used. + + If ``'*`` is used amongst other expressions simultaneously with an ORM + statement, an error is raised as this can't be interpreted correctly by the + ORM. + + .. change:: + :tags: bug, mssql + :tickets: 8210 + + Fixed issues that prevented the new usage patterns for using DML with ORM + objects presented at :ref:`orm_dml_returning_objects` from working + correctly with the SQL Server pyodbc dialect. + + + .. change:: + :tags: bug, mssql + :tickets: 8231 + + Fixed issue where the SQL Server dialect's query for the current isolation + level would fail on Azure Synapse Analytics, due to the way in which this + database handles transaction rollbacks after an error has occurred. The + initial query has been modified to no longer rely upon catching an error + when attempting to detect the appropriate system view. Additionally, to + better support this database's very specific "rollback" behavior, + implemented new parameter ``ignore_no_transaction_on_rollback`` indicating + that a rollback should ignore Azure Synapse error 'No corresponding + transaction found. (111214)', which is raised if no transaction is present + in conflict with the Python DBAPI. + + Initial patch and valuable debugging assistance courtesy of @ww2406. + + .. seealso:: + + :ref:`azure_synapse_ignore_no_transaction_on_rollback` + + .. change:: + :tags: bug, mypy + :tickets: 8196 + + Fixed a crash of the mypy plugin when using a lambda as a Column + default. Pull request curtesy of tchapi. + + + .. change:: + :tags: usecase, engine + + Implemented new :paramref:`_engine.Connection.execution_options.yield_per` + execution option for :class:`_engine.Connection` in Core, to mirror that of + the same :ref:`yield_per ` option available in + the ORM. The option sets both the + :paramref:`_engine.Connection.execution_options.stream_results` option at + the same time as invoking :meth:`_engine.Result.yield_per`, to provide the + most common streaming result configuration which also mirrors that of the + ORM use case in its usage pattern. + + .. seealso:: + + :ref:`engine_stream_results` - revised documentation + + + .. change:: + :tags: bug, engine + + Fixed bug in :class:`_engine.Result` where the usage of a buffered result + strategy would not be used if the dialect in use did not support an + explicit "server side cursor" setting, when using + :paramref:`_engine.Connection.execution_options.stream_results`. This is in + error as DBAPIs such as that of SQLite and Oracle already use a + non-buffered result fetching scheme, which still benefits from usage of + partial result fetching. The "buffered" strategy is now used in all + cases where :paramref:`_engine.Connection.execution_options.stream_results` + is set. + + + .. change:: + :tags: bug, engine + :tickets: 8199 + + Added :meth:`.FilterResult.yield_per` so that result implementations + such as :class:`.MappingResult`, :class:`.ScalarResult` and + :class:`.AsyncResult` have access to this method. .. changelog:: :version: 1.4.39 diff --git a/doc/build/changelog/unreleased_14/4392.rst b/doc/build/changelog/unreleased_14/4392.rst deleted file mode 100644 index 9b83b09cc58..00000000000 --- a/doc/build/changelog/unreleased_14/4392.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 4392 - - Fixed issue in psycopg2 dialect where the "multiple hosts" feature - implemented for :ticket:`4392`, where multiple ``host:port`` pairs could be - passed in the query string as - ``?host=host1:port1&host=host2:port2&host=host3:port3`` was not implemented - correctly, as it did not propagate the "port" parameter appropriately. - Connections that didn't use a different "port" likely worked without issue, - and connections that had "port" for some of the entries may have - incorrectly passed on that hostname. The format is now corrected to pass - hosts/ports appropriately. - - As part of this change, maintained support for another multihost style that - worked unintentionally, which is comma-separated - ``?host=h1,h2,h3&port=p1,p2,p3``. This format is more consistent with - libpq's query-string format, whereas the previous format is inspired by a - different aspect of libpq's URI format but is not quite the same thing. - - If the two styles are mixed together, an error is raised as this is - ambiguous. \ No newline at end of file diff --git a/doc/build/changelog/unreleased_14/7249.rst b/doc/build/changelog/unreleased_14/7249.rst deleted file mode 100644 index 5244a65a207..00000000000 --- a/doc/build/changelog/unreleased_14/7249.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, types - :tickets: 7249 - - Fixed issue where :class:`.TypeDecorator` would not correctly proxy the - ``__getitem__()`` operator when decorating the :class:`_types.ARRAY` - datatype, without explicit workarounds. diff --git a/doc/build/changelog/unreleased_14/8145.rst b/doc/build/changelog/unreleased_14/8145.rst deleted file mode 100644 index 4cd6c12a588..00000000000 --- a/doc/build/changelog/unreleased_14/8145.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 8145 - - Added ``asyncio.shield()`` to the connection and session release process - specifically within the ``__aexit__()`` context manager exit, when using - :class:`.AsyncConnection` or :class:`.AsyncSession` as a context manager - that releases the object when the context manager is complete. This appears - to help with task cancellation when using alternate concurrency libraries - such as ``anyio``, ``uvloop`` that otherwise don't provide an async context - for the connection pool to release the connection properly during task - cancellation. - - diff --git a/doc/build/changelog/unreleased_14/8190.rst b/doc/build/changelog/unreleased_14/8190.rst deleted file mode 100644 index 934e44cf519..00000000000 --- a/doc/build/changelog/unreleased_14/8190.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm, declarative - :tickets: 8190 - - Fixed issue where a hierarchy of classes set up as an abstract or mixin - declarative classes could not declare standalone columns on a superclass - that would then be copied correctly to a :class:`_orm.declared_attr` - callable that wanted to make use of them on a descendant class. diff --git a/doc/build/changelog/unreleased_14/8196.rst b/doc/build/changelog/unreleased_14/8196.rst deleted file mode 100644 index d5afbb8f7a9..00000000000 --- a/doc/build/changelog/unreleased_14/8196.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mypy - :tickets: 8196 - - Fixed a crash of the mypy plugin when using a lambda as a Column - default. Pull request curtesy of tchapi. - diff --git a/doc/build/changelog/unreleased_14/8210.rst b/doc/build/changelog/unreleased_14/8210.rst deleted file mode 100644 index f99d86194f5..00000000000 --- a/doc/build/changelog/unreleased_14/8210.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 8210 - - Fixed issues that prevented the new usage patterns for using DML with ORM - objects presented at :ref:`orm_dml_returning_objects` from working - correctly with the SQL Server pyodbc dialect. - diff --git a/doc/build/changelog/unreleased_14/8231.rst b/doc/build/changelog/unreleased_14/8231.rst deleted file mode 100644 index 401ab717e5d..00000000000 --- a/doc/build/changelog/unreleased_14/8231.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 8231 - - Fixed issue where the SQL Server dialect's query for the current isolation - level would fail on Azure Synapse Analytics, due to the way in which this - database handles transaction rollbacks after an error has occurred. The - initial query has been modified to no longer rely upon catching an error - when attempting to detect the appropriate system view. Additionally, to - better support this database's very specific "rollback" behavior, - implemented new parameter ``ignore_no_transaction_on_rollback`` indicating - that a rollback should ignore Azure Synapse error 'No corresponding - transaction found. (111214)', which is raised if no transaction is present - in conflict with the Python DBAPI. - - Initial patch and valuable debugging assistance courtesy of @ww2406. - - .. seealso:: - - :ref:`azure_synapse_ignore_no_transaction_on_rollback` diff --git a/doc/build/changelog/unreleased_14/8235.rst b/doc/build/changelog/unreleased_14/8235.rst deleted file mode 100644 index ea5726e10d9..00000000000 --- a/doc/build/changelog/unreleased_14/8235.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8235 - - A :func:`_sql.select` construct that is passed a sole '*' argument for - ``SELECT *``, either via string, :func:`_sql.text`, or - :func:`_sql.literal_column`, will be interpreted as a Core-level SQL - statement rather than as an ORM level statement. This is so that the ``*``, - when expanded to match any number of columns, will result in all columns - returned in the result. the ORM- level interpretation of - :func:`_sql.select` needs to know the names and types of all ORM columns up - front which can't be achieved when ``'*'`` is used. - - If ``'*`` is used amongst other expressions simultaneously with an ORM - statement, an error is raised as this can't be interpreted correctly by the - ORM. diff --git a/doc/build/changelog/unreleased_14/8253.rst b/doc/build/changelog/unreleased_14/8253.rst deleted file mode 100644 index 7496ae9fb0c..00000000000 --- a/doc/build/changelog/unreleased_14/8253.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8253 - - Adjusted the SQL compilation for string containment functions - ``.contains()``, ``.startswith()``, ``.endswith()`` to force the use of the - string concatenation operator, rather than relying upon the overload of the - addition operator, so that non-standard use of these operators with for - example bytestrings still produces string concatenation operators. - diff --git a/doc/build/changelog/unreleased_14/8357.rst b/doc/build/changelog/unreleased_14/8357.rst deleted file mode 100644 index 129368bd1c3..00000000000 --- a/doc/build/changelog/unreleased_14/8357.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8357 - - Fixed issue where referencing a CTE multiple times in conjunction with a - polymorphic SELECT could result in multiple "clones" of the same CTE being - constructed, which would then trigger these two CTEs as duplicates. To - resolve, the two CTEs are deep-compared when this occurs to ensure that - they are equivalent, then are treated as equivalent. - diff --git a/doc/build/changelog/unreleased_14/yp.rst b/doc/build/changelog/unreleased_14/yp.rst deleted file mode 100644 index 74e2c6a468f..00000000000 --- a/doc/build/changelog/unreleased_14/yp.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. change:: - :tags: usecase, engine - - Implemented new :paramref:`_engine.Connection.execution_options.yield_per` - execution option for :class:`_engine.Connection` in Core, to mirror that of - the same :ref:`yield_per ` option available in - the ORM. The option sets both the - :paramref:`_engine.Connection.execution_options.stream_results` option at - the same time as invoking :meth:`_engine.Result.yield_per`, to provide the - most common streaming result configuration which also mirrors that of the - ORM use case in its usage pattern. - - .. seealso:: - - :ref:`engine_stream_results` - revised documentation - - -.. change:: - :tags: bug, engine - - Fixed bug in :class:`_engine.Result` where the usage of a buffered result - strategy would not be used if the dialect in use did not support an - explicit "server side cursor" setting, when using - :paramref:`_engine.Connection.execution_options.stream_results`. This is in - error as DBAPIs such as that of SQLite and Oracle already use a - non-buffered result fetching scheme, which still benefits from usage of - partial result fetching. The "buffered" strategy is now used in all - cases where :paramref:`_engine.Connection.execution_options.stream_results` - is set. - - -.. change:: - :tags: bug, engine - :tickets: 8199 - - Added :meth:`.FilterResult.yield_per` so that result implementations - such as :class:`.MappingResult`, :class:`.ScalarResult` and - :class:`.AsyncResult` have access to this method. diff --git a/doc/build/conf.py b/doc/build/conf.py index 10dc263ac22..33ee77319ed 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.39" +release = "1.4.40" -release_date = "June 24, 2022" +release_date = "August 8, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 1918a691e5e1ea6e068e8b785e70f39a72905f03 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Aug 2022 12:16:26 -0400 Subject: [PATCH 336/632] Version 1.4.41 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 217f4669030..68cf42463d2 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.41 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.40 :released: August 8, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 3cae9f5544c..665c0491197 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.40" +__version__ = "1.4.41" def __go(lcls): From 172c8dd0b826e6128b29261caec44ca50eec93ee Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 8 Aug 2022 14:50:29 -0400 Subject: [PATCH 337/632] improve typing intro Fixes: #8362 Change-Id: I38aa1727e94c50a9f06bd75d57ea1ca1cfffd2f3 (cherry picked from commit ac1d775d862e62561effcce54462f687dd79b8bb) --- doc/build/core/metadata.rst | 6 +- doc/build/core/type_basics.rst | 237 ++++++++++++++++++++++----------- 2 files changed, 166 insertions(+), 77 deletions(-) diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 701146195a7..03721c2b6c3 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -34,7 +34,11 @@ primary arguments are the table name, then the The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column:: - user = Table('user', metadata_obj, + from sqlalchemy import Table, Column, Integer, String + + user = Table( + 'user', + metadata_obj, Column('user_id', Integer, primary_key=True), Column('user_name', String(16), nullable=False), Column('email_address', String(60)), diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 069214f99b9..52debc32f9a 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -4,31 +4,172 @@ Column and Data Types .. module:: sqlalchemy.types SQLAlchemy provides abstractions for most common database data types, -and a mechanism for specifying your own custom data types. +as well as several techniques for customization of datatypes. + +Database types are represented using Python classes, all of which ultimately +extend from the base type class known as :class:`_types.TypeEngine`. There are +two general categories of datatypes, each of which express themselves within +the typing hierarchy in different ways. The category used by an individual +datatype class can be identified based on the use of two different naming +conventions, which are "CamelCase" and "UPPERCASE". + +.. seealso:: + + :ref:`tutorial_core_metadata` - in the :ref:`unified_tutorial`. Illustrates + the most rudimental use of :class:`_types.TypeEngine` type objects to + define :class:`_schema.Table` metadata and introduces the concept + of type objects in tutorial form. + +The "CamelCase" datatypes +------------------------- + +The rudimental types have "CamelCase" names such as :class:`_types.String`, +:class:`_types.Numeric`, :class:`_types.Integer`, and :class:`_types.DateTime`. +All of the immediate subclasses of :class:`_types.TypeEngine` are +"CamelCase" types. The "CamelCase" types are to the greatest degree possible +**database agnostic**, meaning they can all be used on any database backend +where they will behave in such a way as appropriate to that backend in order to +produce the desired behavior. + +An example of a straightforward "CamelCase" datatype is :class:`_types.String`. +On most backends, using this datatype in a +:ref:`table specification ` will correspond to the +``VARCHAR`` database type being used on the target backend, delivering string +values to and from the database, as in the example below:: + + from sqlalchemy import MetaData + from sqlalchemy import Table, Column, Integer, String + + metadata_obj = MetaData() + + user = Table( + 'user', + metadata_obj, + Column('user_name', String, primary_key=True), + Column('email_address', String(60)), + ) + +When using a particular :class:`_types.TypeEngine` class in a +:class:`_schema.Table` definition or in any SQL expression overall, if no +arguments are required it may be passed as the class itself, that is, without +instantiating it with ``()``. If arguments are needed, such as the length +argument of 60 in the ``"email_address"`` column above, the type may be +instantiated. + +Another "CamelCase" datatype that expresses more backend-specific behavior +is the :class:`_types.Boolean` datatype. Unlike :class:`_types.String`, +which represents a string datatype that all databases have, +not every backend has a real "boolean" datatype; some make use of integers +or BIT values 0 and 1, some have boolean literal constants ``true`` and +``false`` while others dont. For this datatype, :class:`_types.Boolean` +may render ``BOOLEAN`` on a backend such as PostgreSQL, ``BIT`` on the +MySQL backend and ``SMALLINT`` on Oracle. As data is sent and received +from the database using this type, based on the dialect in use it may be +interpreting Python numeric or boolean values. + +The typical SQLAlchemy application will likely wish to use primarily +"CamelCase" types in the general case, as they will generally provide the best +basic behavior and be automatically portable to all backends. + +Reference for the general set of "CamelCase" datatypes is below at +:ref:`types_generic`. + +The "UPPERCASE" datatypes +------------------------- + +In contrast to the "CamelCase" types are the "UPPERCASE" datatypes. These +datatypes are always inherited from a particular "CamelCase" datatype, and +always represent an **exact** datatype. When using an "UPPERCASE" datatype, +the name of the type is always rendered exactly as given, without regard for +whether or not the current backend supports it. Therefore the use +of "UPPERCASE" types in a SQLAlchemy application indicates that specific +datatypes are required, which then implies that the application would normally, +without additional steps taken, +be limited to those backends which use the type exactly as given. Examples +of UPPERCASE types include :class:`_types.VARCHAR`, :class:`_types.NUMERIC`, +:class:`_types.INTEGER`, and :class:`_types.TIMESTAMP`, which inherit directly +from the previously mentioned "CamelCase" types +:class:`_types.String`, +:class:`_types.Numeric`, :class:`_types.Integer`, and :class:`_types.DateTime`, +respectively. + +The "UPPERCASE" datatypes that are part of ``sqlalchemy.types`` are common +SQL types that typically expect to be available on at least two backends +if not more. + +Reference for the general set of "UPPERCASE" datatypes is below at +:ref:`types_sqlstandard`. + + + +.. _types_vendor: + +Backend-specific "UPPERCASE" datatypes +-------------------------------------- -The methods and attributes of type objects are rarely used directly. -Type objects are supplied to :class:`~sqlalchemy.schema.Table` definitions -and can be supplied as type hints to `functions` for occasions where -the database driver returns an incorrect type. +Most databases also have their own datatypes that +are either fully specific to those databases, or add additional arguments +that are specific to those databases. For these datatypes, specific +SQLAlchemy dialects provide **backend-specific** "UPPERCASE" datatypes, for a +SQL type that has no analogue on other backends. Examples of backend-specific +uppercase datatypes include PostgreSQL's :class:`_postgresql.JSONB`, SQL Server's +:class:`_mssql.IMAGE` and MySQL's :class:`_mysql.TINYTEXT`. + +Specific backends may also include "UPPERCASE" datatypes that extend the +arguments available from that same "UPPERCASE" datatype as found in the +``sqlalchemy.types`` module. An example is when creating a MySQL string +datatype, one might want to specify MySQL-specific arguments such as ``charset`` +or ``national``, which are available from the MySQL version +of :class:`_mysql.VARCHAR` as the MySQL-only parameters +:paramref:`_mysql.VARCHAR.charset` and :paramref:`_mysql.VARCHAR.national`. + +API documentation for backend-specific types are in the dialect-specific +documentation, listed at :ref:`dialect_toplevel`. + + +Using "UPPERCASE" and Backend-specific types for multiple backends +------------------------------------------------------------------ + +Reviewing the presence of "UPPERCASE" and "CamelCase" types leads to the natural +use case of how to make use of "UPPERCASE" datatypes for backend-specific +options, but only when that backend is in use. To tie together the +database-agnostic "CamelCase" and backend-specific "UPPERCASE" systems, one +makes use of the :meth:`_types.TypeEngine.with_variant` method in order to +**compose** types together to work with specific behaviors on specific backends. + +Such as, to use the :class:`_types.String` datatype, but when running on MySQL +to make use of the :paramref:`_mysql.VARCHAR.charset` parameter of +:class:`_mysql.VARCHAR` when the table is created on MySQL or MariaDB, +:meth:`_types.TypeEngine.with_variant` may be used as below:: + + from sqlalchemy import MetaData + from sqlalchemy import Table, Column, Integer, String + from sqlalchemy.dialects.mysql import VARCHAR + + metadata_obj = MetaData() + + user = Table( + "user", + metadata_obj, + Column("user_name", String, primary_key=True), + Column( + "bio", + String(255).with_variant(VARCHAR(255, charset="utf-8"), "mysql", "mariadb"), + ), + ) -.. code-block:: pycon +In the above table definition, the ``"bio"`` column will have string-behaviors +on all backends. On most backends it will render in DDL as ``VARCHAR``. +However on MySQL and MariaDB, it will render as ``VARCHAR(255) CHARACTER SET utf-8``. - >>> users = Table('users', metadata, - ... Column('id', Integer, primary_key=True), - ... Column('login', String(32)) - ... ) +.. seealso:: -SQLAlchemy will use the ``Integer`` and ``String(32)`` type -information when issuing a ``CREATE TABLE`` statement and will use it -again when reading back rows ``SELECTed`` from the database. -Functions that accept a type (such as :func:`~sqlalchemy.schema.Column`) will -typically accept a type class or instance; ``Integer`` is equivalent -to ``Integer()`` with no construction arguments in this case. + :meth:`_types.TypeEngine.with_variant` - additional usage examples and notes .. _types_generic: -Generic Types -------------- +Generic "CamelCase" Types +------------------------- Generic types specify a column that can read, write and store a particular type of Python data. SQLAlchemy will choose the best @@ -97,8 +238,8 @@ type is emitted in ``CREATE TABLE``, such as ``VARCHAR`` see .. _types_sqlstandard: -SQL Standard and Multiple Vendor Types --------------------------------------- +SQL Standard and Multiple Vendor "UPPERCASE" Types +-------------------------------------------------- This category of types refers to types that are either part of the SQL standard, or are potentially found within a subset of database backends. @@ -181,59 +322,3 @@ its exact name in DDL with ``CREATE TABLE`` is issued. .. autoclass:: VARCHAR -.. _types_vendor: - -Vendor-Specific Types ---------------------- - -Database-specific types are also available for import from each -database's dialect module. See the :ref:`dialect_toplevel` -reference for the database you're interested in. - -For example, MySQL has a ``BIGINT`` type and PostgreSQL has an -``INET`` type. To use these, import them from the module explicitly:: - - from sqlalchemy.dialects import mysql - - table = Table('foo', metadata, - Column('id', mysql.BIGINT), - Column('enumerates', mysql.ENUM('a', 'b', 'c')) - ) - -Or some PostgreSQL types:: - - from sqlalchemy.dialects import postgresql - - table = Table('foo', metadata, - Column('ipaddress', postgresql.INET), - Column('elements', postgresql.ARRAY(String)) - ) - -Each dialect provides the full set of database types supported by -that backend within its own module, so they may all be used -against the module directly without the need to differentiate between -which types are specific to that backend or not:: - - from sqlalchemy.dialects import postgresql - - t = Table('mytable', metadata, - Column('id', postgresql.INTEGER, primary_key=True), - Column('name', postgresql.VARCHAR(300)), - Column('inetaddr', postgresql.INET) - ) - -Where above, the INTEGER and VARCHAR types are ultimately from -sqlalchemy.types, and INET is specific to the PostgreSQL dialect. - -Some dialect level types have the same name as the SQL standard type, -but also provide additional arguments. For example, MySQL implements -the full range of character and string types including additional arguments -such as `collation` and `charset`:: - - from sqlalchemy.dialects.mysql import VARCHAR, TEXT - - table = Table('foo', metadata_obj, - Column('col1', VARCHAR(200, collation='binary')), - Column('col2', TEXT(charset='latin1')) - ) - From 0585c66a20b8075252bb4bbdeef0448f224de96a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 9 Aug 2022 09:50:57 -0400 Subject: [PATCH 338/632] improve names for datatypes section Continuing along #8362, if we look at the document here without "this is the same text I've read for 14 years", we begin to see that the title "Column and Data types" makes no sense at all, is there a "column type" and a "Data type"? I guess what I was thinking at that time is that a type can be placed on a Column, or it can be used whenever you have, you know, "data". The phrase "SQL expression" wasn't discovered yet. "SQL Datatype" is not spectacular but at least it's one term. the new intro then is focused on the hierarchy layout so let's name it that. not amazing, but better than the duplicate name that made no sense before. Fixes: #8362 Change-Id: Iab37ef5605ec55f30284ac9a98bf7246f736675d (cherry picked from commit 7e442cd0a9341ac828b4c4820818ad80ad9200fa) --- doc/build/core/type_basics.rst | 2 +- doc/build/core/types.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 52debc32f9a..eb6c8791258 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -1,4 +1,4 @@ -Column and Data Types +The Type Hierarchy ===================== .. module:: sqlalchemy.types diff --git a/doc/build/core/types.rst b/doc/build/core/types.rst index 762105646cb..d569bdee77e 100644 --- a/doc/build/core/types.rst +++ b/doc/build/core/types.rst @@ -1,6 +1,6 @@ .. _types_toplevel: -Column and Data Types +SQL Datatype Objects ===================== .. toctree:: From b5f25eb99b65925139b099c4e37717969542d108 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 17 Aug 2022 09:17:33 -0400 Subject: [PATCH 339/632] remove erroneous note about future metadata this change was already applied Change-Id: I24cfdc1912b77f98ae6d0f3865cabd223553fc79 (cherry picked from commit 0eea54e84df104ffe8bc246eb82e4a7a2cf05079) --- lib/sqlalchemy/sql/events.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index c42578986de..872d17cac43 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -268,9 +268,6 @@ def receive_column_reflect(inspector, table, column_info): ] ) - A future release will allow it to be associated with a specific - :class:`_schema.MetaData` object as well. - The dictionary of column information as returned by the dialect is passed, and can be modified. The dictionary is that returned in each element of the list returned From 0d66f491d0f062bda95e2997d4f70841ac2228d8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 16 Aug 2022 14:25:12 -0400 Subject: [PATCH 340/632] refine transfer of cached ORM options for selectin, lazy Fixed issue involving :func:`_orm.with_loader_criteria` where a closure variable used as bound parameter value within the lambda would not carry forward correctly into additional relationship loaders such as :func:`_orm.selectinload` and :func:`_orm.lazyload` after the statement were cached, using the stale originally-cached value instead. This change brings forth a good refinement where we finally realize we shouldn't be testing every ORM option with lots of switches, we just let the option itself be given "here is your uncached version, you are cached, tell us what to do!". the current decision is that strategy loader options used the cached in all cases as they always have, with_loader_criteria uses the uncached, because the uncached will have been invoked with new closure state that we definitely need. The only edge that might not work is if with_loader_criteria referenced an entity that is local to the query, namely a specific AliasedInsp, however that's not a documented case for this. if we had to do that, then we perhaps would introduce a more complex reconcilation logic, and this would also give us the hook to do that. For this approach to work in 1.4, state.load_options has to be ordered, so, this does the switch of load_options from set->tuple, which has been in 2.0 for a long time. if this change is not feasbile, due to affecting other areas, we may have to scale back this fix a bit, but for here, it's just two collections without any deep impacts. Fixes: #8399 Change-Id: Ided8e2123915131e3f11cf6b06d773039e73797a (cherry picked from commit 860d582028f6bbbb39cbf17698f7d6b7a8e458ea) --- doc/build/changelog/unreleased_14/8399.rst | 10 ++ lib/sqlalchemy/orm/context.py | 42 +++-- lib/sqlalchemy/orm/interfaces.py | 40 +++++ lib/sqlalchemy/orm/state.py | 2 +- lib/sqlalchemy/orm/strategies.py | 58 +++---- lib/sqlalchemy/orm/strategy_options.py | 3 + test/orm/inheritance/test_poly_loading.py | 25 ++- test/orm/test_events.py | 14 +- test/orm/test_merge.py | 8 +- test/orm/test_options.py | 23 +-- test/orm/test_relationship_criteria.py | 176 +++++++++++++++++++++ 11 files changed, 309 insertions(+), 92 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8399.rst diff --git a/doc/build/changelog/unreleased_14/8399.rst b/doc/build/changelog/unreleased_14/8399.rst new file mode 100644 index 00000000000..aea9e523816 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8399.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 8399 + + Fixed issue involving :func:`_orm.with_loader_criteria` where a closure + variable used as bound parameter value within the lambda would not carry + forward correctly into additional relationship loaders such as + :func:`_orm.selectinload` and :func:`_orm.lazyload` after the statement + were cached, using the stale originally-cached value instead. + diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 9d4f652ea4f..592a2c1e4df 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -106,29 +106,25 @@ def __init__( self.loaders_require_uniquing = False self.params = params - self.propagated_loader_options = { - # issue 7447. - # propagated loader options will be present on loaded InstanceState - # objects under state.load_options and are typically used by - # LazyLoader to apply options to the SELECT statement it emits. - # For compile state options (i.e. loader strategy options), these - # need to line up with the ".load_path" attribute which in - # loader.py is pulled from context.compile_state.current_path. - # so, this means these options have to be the ones from the - # *cached* statement that's travelling with compile_state, not the - # *current* statement which won't match up for an ad-hoc - # AliasedClass - cached_o - for cached_o in compile_state.select_statement._with_options - if cached_o.propagate_to_loaders and cached_o._is_compile_state - } | { - # for user defined loader options that are not "compile state", - # those just need to be present as they are - uncached_o - for uncached_o in statement._with_options - if uncached_o.propagate_to_loaders - and not uncached_o._is_compile_state - } + cached_options = compile_state.select_statement._with_options + uncached_options = statement._with_options + + # see issue #7447 , #8399 for some background + # propagated loader options will be present on loaded InstanceState + # objects under state.load_options and are typically used by + # LazyLoader to apply options to the SELECT statement it emits. + # For compile state options (i.e. loader strategy options), these + # need to line up with the ".load_path" attribute which in + # loader.py is pulled from context.compile_state.current_path. + # so, this means these options have to be the ones from the + # *cached* statement that's travelling with compile_state, not the + # *current* statement which won't match up for an ad-hoc + # AliasedClass + self.propagated_loader_options = tuple( + opt._adapt_cached_option_to_uncached_option(self, uncached_opt) + for opt, uncached_opt in zip(cached_options, uncached_options) + if opt.propagate_to_loaders + ) self.attributes = dict(compile_state.attributes) diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 63295d0b9e4..7e86326cc48 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -754,6 +754,46 @@ class ORMOption(ExecutableOption): _is_strategy_option = False + def _adapt_cached_option_to_uncached_option(self, context, uncached_opt): + """given "self" which is an option from a cached query, as well as the + corresponding option from the uncached version of the same query, + return the option we should use in a new query, in the context of a + loader strategy being asked to load related rows on behalf of that + cached query, which is assumed to be building a new query based on + entities passed to us from the cached query. + + Currently this routine chooses between "self" and "uncached" without + manufacturing anything new. If the option is itself a loader strategy + option which has a path, that path needs to match to the entities being + passed to us by the cached query, so the :class:`_orm.Load` subclass + overrides this to return "self". For all other options, we return the + uncached form which may have changing state, such as a + with_loader_criteria() option which will very often have new state. + + This routine could in the future involve + generating a new option based on both inputs if use cases arise, + such as if with_loader_criteria() needed to match up to + ``AliasedClass`` instances given in the parent query. + + However, longer term it might be better to restructure things such that + ``AliasedClass`` entities are always matched up on their cache key, + instead of identity, in things like paths and such, so that this whole + issue of "the uncached option does not match the entities" goes away. + However this would make ``PathRegistry`` more complicated and difficult + to debug as well as potentially less performant in that it would be + hashing enormous cache keys rather than a simple AliasedInsp. UNLESS, + we could get cache keys overall to be reliably hashed into something + like an md5 key. + + .. versionadded:: 1.4.41 + + + """ + if uncached_opt is not None: + return uncached_opt + else: + return self + class CompileStateOption(HasCacheKey, ORMOption): """base for :class:`.ORMOption` classes that affect the compilation of diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 9718024292f..b4e7076a4a8 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -70,7 +70,7 @@ class InstanceState(interfaces.InspectionAttrInfo): session_id = None key = None runid = None - load_options = util.EMPTY_SET + load_options = () load_path = PathRegistry.root insert_order = None _strong_obj = None diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 71aae00807a..288e6e06bfc 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -981,13 +981,11 @@ def _emit_lazyload( opts += ( orm_util.LoaderCriteriaOption(self.entity, extra_criteria), ) - stmt._with_options = opts else: # this path is used if there are not already any options # in the query, but an event may want to add them effective_path = state.mapper._path_registry[self.parent_property] - stmt._compile_options += {"_current_path": effective_path} if use_get: @@ -2932,29 +2930,25 @@ def _load_for_path( # cached query, meaning it won't match on paths and loader lookups # and loaders like this one will be skipped if it is used in options. # - # Now we want to transfer loader options from the parent query to the - # "selectinload" query we're about to run. Which query do we transfer - # the options from? We use the cached query, because the options in - # that query will be in terms of the effective entity we were just - # handed. + # as it turns out, standard loader options like selectinload(), + # lazyload() that have a path need + # to come from the cached query so that the AliasedInsp etc. objects + # that are in the query line up with the object that's in the path + # of the strategy object. however other options like + # with_loader_criteria() that doesn't have a path (has a fixed entity) + # and needs to have access to the latest closure state in order to + # be correct, we need to use the uncached one. # - # But now the selectinload query we are running is *also* - # cached. What if it's cached and running from some previous iteration - # of that AliasedInsp? Well in that case it will also use the previous - # iteration of the loader options. If the query expires and - # gets generated again, it will be handed the current effective_entity - # and the current _with_options, again in terms of whatever - # compile_state.select_statement happens to be right now, so the - # query will still be internally consistent and loader callables - # will be correctly invoked. + # as of #8399 we let the loader option itself figure out what it + # wants to do given cached and uncached version of itself. effective_path = path[self.parent_property] if orig_query is context.query: - options = new_options = orig_query._with_options - user_defined_options = [] + new_options = orig_query._with_options else: - options = orig_query._with_options + cached_options = orig_query._with_options + uncached_options = context.query._with_options # propagate compile state options from the original query, # updating their "extra_criteria" as necessary. @@ -2962,20 +2956,13 @@ def _load_for_path( # "orig" options if extra_criteria is present, because the copy # of extra_criteria will have different boundparam than that of # the QueryableAttribute in the path - new_options = [ - orig_opt._adjust_for_extra_criteria(context) - if orig_opt._is_strategy_option - else orig_opt - for orig_opt in options - if orig_opt._is_compile_state or orig_opt._is_legacy_option - ] - - # propagate user defined options from the current query - user_defined_options = [ - opt - for opt in context.query._with_options - if not opt._is_compile_state and not opt._is_legacy_option + orig_opt._adapt_cached_option_to_uncached_option( + context, uncached_opt + ) + for orig_opt, uncached_opt in zip( + cached_options, uncached_options + ) ] if loadopt and loadopt._extra_criteria: @@ -2986,12 +2973,9 @@ def _load_for_path( ), ) - q = q.options(*new_options)._update_compile_options( - {"_current_path": effective_path} - ) - if user_defined_options: - q = q.options(*user_defined_options) + q = q.options(*new_options) + q = q._update_compile_options({"_current_path": effective_path}) if context.populate_existing: q = q.execution_options(populate_existing=True) diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index c3dd5df3b55..1b5e762eb27 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -115,6 +115,9 @@ def for_existing_path(cls, path): load._extra_criteria = () return load + def _adapt_cached_option_to_uncached_option(self, context, uncached_opt): + return self._adjust_for_extra_criteria(context) + def _generate_extra_criteria(self, context): """Apply the current bound parameters in a QueryContext to the immediate "extra_criteria" stored with this Load object. diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 1e3b15575d7..31e5e4ca906 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -24,6 +24,7 @@ from sqlalchemy.testing import assertsql from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import AllOf from sqlalchemy.testing.assertsql import CompiledSQL @@ -963,15 +964,23 @@ class AnyOpt(CompileStateOption): _cache_key_traversal = () propagate_to_loaders = True - any_opt = AnyOpt() - if strat is None: - opts = (any_opt,) - else: - opts = (strat(User.address), any_opt) + def _adjust_for_extra_criteria(self, context): + return self + + from sqlalchemy.orm.strategy_options import Load + + with mock.patch.object( + Load, "_adjust_for_extra_criteria", lambda self, ctx: self + ): + any_opt = AnyOpt() + if strat is None: + opts = (any_opt,) + else: + opts = (strat(User.address), any_opt) - u = sess.execute(select(User).options(*opts)).scalars().one() - address = u.address - eq_(inspect(address).load_options, set(opts)) + u = sess.execute(select(User).options(*opts)).scalars().one() + address = u.address + eq_(inspect(address).load_options, opts) class NoBaseWPPlusAliasedTest( diff --git a/test/orm/test_events.py b/test/orm/test_events.py index c0fbaba7d6f..4009dc3aecb 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -201,7 +201,7 @@ def _gen_cache_key(self, anon_map, bindparams): def go(context): for elem in context.user_defined_options: if isinstance(elem, SetShardOption): - m1.update_execution_options(_sa_shard_id=elem.payload) + m1.do_some_mock_thing(_sa_shard_id=elem.payload) stmt = select(User).options( loader_opt(User.addresses).options(loader_opt(Address.dingaling)), @@ -217,21 +217,15 @@ def go(context): loader_opt(User.addresses).options(loader_opt(Address.dingaling)), SetShardOption("some_other_shard"), ) + for u in s.execute(stmt).unique().scalars(): for a in u.addresses: a.dingaling eq_( m1.mock_calls, - ( - [call.update_execution_options(_sa_shard_id="some_shard")] - * num_opts - ) + ([call.do_some_mock_thing(_sa_shard_id="some_shard")] * num_opts) + ( - [ - call.update_execution_options( - _sa_shard_id="some_other_shard" - ) - ] + [call.do_some_mock_thing(_sa_shard_id="some_other_shard")] * num_opts ), ) diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py index d2eade0ea1a..3e29d5cd796 100644 --- a/test/orm/test_merge.py +++ b/test/orm/test_merge.py @@ -1567,7 +1567,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set()) + eq_(ustate.load_options, ()) for u in s2_users: sess.merge(u) @@ -1575,7 +1575,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt2])) + eq_(ustate.load_options, (opt2,)) # test 2. present options are replaced by merge options sess = fixture_session() @@ -1583,7 +1583,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt1])) + eq_(ustate.load_options, (opt1,)) for u in s2_users: sess.merge(u) @@ -1591,7 +1591,7 @@ class Option(MapperOption): for u in s1_users: ustate = attributes.instance_state(u) eq_(ustate.load_path.path, (umapper,)) - eq_(ustate.load_options, set([opt2])) + eq_(ustate.load_options, (opt2,)) def test_resolve_conflicts_pending_doesnt_interfere_no_ident(self): User, Address, Order = ( diff --git a/test/orm/test_options.py b/test/orm/test_options.py index 1a2a5ba70f9..840b3dc2148 100644 --- a/test/orm/test_options.py +++ b/test/orm/test_options.py @@ -23,6 +23,7 @@ from sqlalchemy.orm import util as orm_util from sqlalchemy.orm import with_polymorphic from sqlalchemy.testing import fixtures +from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises_message from sqlalchemy.testing.assertions import AssertsCompiledSQL from sqlalchemy.testing.assertions import emits_warning @@ -2050,12 +2051,16 @@ def test_option_propagate(self): oalias = aliased(Order) opt1 = sa.orm.joinedload(User.orders, Order.items) opt2 = sa.orm.contains_eager(User.orders, Order.items, alias=oalias) - u1 = ( - sess.query(User) - .join(oalias, User.orders) - .options(opt1, opt2) - .first() - ) - ustate = attributes.instance_state(u1) - assert opt1 in ustate.load_options - assert opt2 not in ustate.load_options + + with mock.patch.object( + Load, "_adjust_for_extra_criteria", lambda self, ctx: self + ): + u1 = ( + sess.query(User) + .join(oalias, User.orders) + .options(opt1, opt2) + .first() + ) + ustate = attributes.instance_state(u1) + assert opt1 in ustate.load_options + assert opt2 not in ustate.load_options diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 5f47b49ac7a..7a347cd55b9 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -481,6 +481,63 @@ def test_select_selectinload_mapper_mapper_criteria( ), ) + def test_select_selectinload_mapper_mapper_closure_criteria( + self, user_address_fixture + ): + User, Address = user_address_fixture + + def get_statement(closure="name"): + + stmt = select(User).options( + selectinload(User.addresses), + with_loader_criteria( + Address, lambda cls: cls.email_address != closure + ), + ) + return stmt + + s = Session(testing.db, future=True) + + stmt = get_statement(closure="name") + with self.sql_execution_asserter() as asserter: + s.execute(stmt).all() + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users", + [], + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, addresses.id " + "AS addresses_id, addresses.email_address " + "AS addresses_email_address FROM addresses " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"primary_keys": [7, 8, 9, 10], "closure_1": "name"}], + ), + ) + + stmt = get_statement(closure="new name") + with self.sql_execution_asserter() as asserter: + s.execute(stmt).all() + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users", + [], + ), + CompiledSQL( + "SELECT addresses.user_id AS addresses_user_id, addresses.id " + "AS addresses_id, addresses.email_address " + "AS addresses_email_address FROM addresses " + "WHERE addresses.user_id IN (__[POSTCOMPILE_primary_keys]) " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"primary_keys": [7, 8, 9, 10], "closure_1": "new name"}], + ), + ) + def test_select_lazyload_mapper_mapper_criteria( self, user_address_fixture ): @@ -543,6 +600,125 @@ def test_select_lazyload_mapper_mapper_criteria( ), ) + def test_select_lazyload_mapper_mapper_closure_criteria( + self, user_address_fixture + ): + User, Address = user_address_fixture + + def get_statement(closure="name"): + + stmt = ( + select(User) + .options( + lazyload(User.addresses), + with_loader_criteria( + Address, lambda cls: cls.email_address != closure + ), + ) + .order_by(User.id) + ) + return stmt + + s = Session(testing.db, future=True) + + stmt = get_statement(closure="name") + with self.sql_execution_asserter() as asserter: + for obj in s.scalars(stmt).all(): + obj.addresses + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id", + [], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 7, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 8, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 9, "closure_1": "name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 10, "closure_1": "name"}], + ), + ) + + stmt = get_statement(closure="new name") + with self.sql_execution_asserter() as asserter: + for obj in s.scalars( + stmt, execution_options={"populate_existing": True} + ).all(): + obj.addresses + + asserter.assert_( + CompiledSQL( + "SELECT users.id, users.name FROM users ORDER BY users.id", + [], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 7, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 8, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 9, "closure_1": "new name"}], + ), + CompiledSQL( + "SELECT addresses.id AS addresses_id, " + "addresses.user_id AS addresses_user_id, " + "addresses.email_address AS addresses_email_address " + "FROM addresses WHERE :param_1 = addresses.user_id " + "AND addresses.email_address != :closure_1 " + "ORDER BY addresses.id", + [{"param_1": 10, "closure_1": "new name"}], + ), + ) + def test_select_aliased_inclaliased_criteria(self, user_address_fixture): User, Address = user_address_fixture From 9cd7cca2c70e87c852af7e570aabdfa7463ce645 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 17 Aug 2022 13:06:51 -0400 Subject: [PATCH 341/632] fill out all distinguising fields for AliasedInsp Hardened the cache key strategy for the :func:`_orm.aliased` and :func:`_orm.with_polymorphic` constructs. While no issue involving actual statements being cached can easily be demonstrated (if at all), these two constructs were not including enough of what makes them unique in their cache keys for caching on the aliased construct alone to be accurate. Fixes: #8401 Change-Id: I13d14985b6965f398edd9494601d8ae89ac641f2 (cherry picked from commit a58f1b9c698dc7be29d43f2c4c21de8918943f77) --- doc/build/changelog/unreleased_14/8401.rst | 9 ++ lib/sqlalchemy/orm/util.py | 19 ++-- test/orm/test_cache_key.py | 102 ++++++++++++++++++++- test/sql/test_resultset.py | 1 + 4 files changed, 124 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8401.rst diff --git a/doc/build/changelog/unreleased_14/8401.rst b/doc/build/changelog/unreleased_14/8401.rst new file mode 100644 index 00000000000..119c6cff1a0 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8401.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: orm, bug + :tickets: 8401 + + Hardened the cache key strategy for the :func:`_orm.aliased` and + :func:`_orm.with_polymorphic` constructs. While no issue involving actual + statements being cached can easily be demonstrated (if at all), these two + constructs were not including enough of what makes them unique in their + cache keys for caching on the aliased construct alone to be accurate. diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 56aa9ff6c74..6f3278ed789 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -650,6 +650,19 @@ class AliasedInsp( """ + _cache_key_traversal = [ + ("name", visitors.ExtendedInternalTraversal.dp_string), + ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), + ("_use_mapper_path", visitors.ExtendedInternalTraversal.dp_boolean), + ("_target", visitors.ExtendedInternalTraversal.dp_inspectable), + ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), + ( + "with_polymorphic_mappers", + visitors.InternalTraversal.dp_has_cache_key_list, + ), + ("polymorphic_on", visitors.InternalTraversal.dp_clauseelement), + ] + def __init__( self, entity, @@ -756,12 +769,6 @@ def __clause_element__(self): def entity_namespace(self): return self.entity - _cache_key_traversal = [ - ("name", visitors.ExtendedInternalTraversal.dp_string), - ("_adapt_on_names", visitors.ExtendedInternalTraversal.dp_boolean), - ("selectable", visitors.ExtendedInternalTraversal.dp_clauseelement), - ] - @property def class_(self): """Return the mapped class ultimately represented by this diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index f42e59216a0..daf963952c8 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -5,6 +5,7 @@ from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import select from sqlalchemy import Table @@ -63,8 +64,19 @@ def setup_mappers(cls): def test_mapper_and_aliased(self): User, Address, Keyword = self.classes("User", "Address", "Keyword") + addresses_table = self.tables.addresses + self._run_cache_key_fixture( - lambda: (inspect(User), inspect(Address), inspect(aliased(User))), + lambda: ( + inspect(User), + inspect(Address), + inspect(aliased(User)), + inspect(aliased(aliased(User, addresses_table))), + inspect(aliased(aliased(User), addresses_table.select())), + inspect(aliased(Address)), + inspect(aliased(Address, addresses_table.select())), + inspect(aliased(User, addresses_table.select())), + ), compare_values=True, ) @@ -606,6 +618,94 @@ def test_wp_objects(self): compare_values=True, ) + def test_wpoly_cache_keys(self): + Person, Manager, Engineer, Boss = self.classes( + "Person", "Manager", "Engineer", "Boss" + ) + + meb_stmt = inspect( + with_polymorphic(Person, [Manager, Engineer, Boss]) + ).selectable + me_stmt = inspect( + with_polymorphic(Person, [Manager, Engineer]) + ).selectable + + self._run_cache_key_fixture( + lambda: ( + inspect(Person), + inspect( + aliased(Person, me_stmt), + ), + inspect( + aliased(Person, meb_stmt), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer]), + ), + # aliased=True is the same as flat=True for default selectable + inspect( + with_polymorphic( + Person, [Manager, Engineer], aliased=True + ), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer], flat=True), + ), + inspect( + with_polymorphic( + Person, [Manager, Engineer], flat=True, innerjoin=True + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + flat=True, + _use_mapper_path=True, + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + flat=True, + adapt_on_names=True, + ), + ), + inspect( + with_polymorphic( + Person, [Manager, Engineer], selectable=meb_stmt + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer], + selectable=meb_stmt, + aliased=True, + ), + ), + inspect( + with_polymorphic(Person, [Manager, Engineer, Boss]), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer, Boss], + polymorphic_on=literal_column("foo"), + ), + ), + inspect( + with_polymorphic( + Person, + [Manager, Engineer, Boss], + polymorphic_on=literal_column("bar"), + ), + ), + ), + compare_values=True, + ) + def test_wp_queries(self): Person, Manager, Engineer, Boss = self.classes( "Person", "Manager", "Engineer", "Boss" diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 13ffc5eebdf..13190f915f9 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -732,6 +732,7 @@ def test_column_accessor_err(self, connection): lambda: r._mapping["foo"], ) + @testing.skip_if("+aiosqlite", "unknown issue") @testing.combinations( (True,), (False,), From 51ff158fe78d26433c420143c0f291ca56dac590 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 18 Aug 2022 11:29:25 -0400 Subject: [PATCH 342/632] remove nonsensical note this note is not factually incorrect but it makes no sense why this would be in the middle of the abstract concrete doc Change-Id: Icd67d9d9a93ee72714bf85d8d5ca39c0af61f356 (cherry picked from commit acf14885833da238606e6a0df7c5ab256e477f2c) --- doc/build/orm/inheritance.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index e9b1998ee5b..27498b375af 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -770,14 +770,6 @@ of ``Manager`` and ``Engineer`` instances. This brings us back into the domain of concrete inheritance, and we must build a special mapper against ``Employee`` in order to achieve this. -.. topic:: Mappers can always SELECT - - In SQLAlchemy, a mapper for a class always has to refer to some - "selectable", which is normally a :class:`_schema.Table` but may also refer to any - :func:`_expression.select` object as well. While it may appear that a "single table - inheritance" mapper does not map to a table, these mappers in fact - implicitly refer to the table that is mapped by a superclass. - To modify our concrete inheritance example to illustrate an "abstract" base that is capable of polymorphic loading, we will have only an ``engineer`` and a ``manager`` table and no ``employee`` From c6140f34ef022ce87d61d0c0ad55c6c516393a73 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 18 Aug 2022 11:54:04 -0400 Subject: [PATCH 343/632] more abstractconcretebase clarity Change-Id: I9ddb6b1a2e0c0be1fe355a7ea714d0e16aa93b47 (cherry picked from commit a47d76ca25275344345b208def5f72292e8687b4) --- doc/build/orm/inheritance.rst | 36 +++++++++++----- lib/sqlalchemy/ext/declarative/extensions.py | 44 ++++++++++---------- 2 files changed, 48 insertions(+), 32 deletions(-) diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index 27498b375af..4fd3569be5b 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -637,7 +637,9 @@ Using :class:`.ConcreteBase`, we can set up our concrete mapping in almost the same way as we do other forms of inheritance mappings:: from sqlalchemy.ext.declarative import ConcreteBase + from sqlalchemy.orm import declarative_base + Base = declarative_base() class Employee(ConcreteBase, Base): __tablename__ = "employee" @@ -723,6 +725,12 @@ The above UNION query needs to manufacture "NULL" columns for each subtable in order to accommodate for those columns that aren't members of that particular subclass. +.. seealso:: + + :class:`.ConcreteBase` + +.. _abstract_concrete_base: + Abstract Concrete Classes +++++++++++++++++++++++++ @@ -737,6 +745,10 @@ tables, and leave the base class unmapped, this can be achieved very easily. When using Declarative, just declare the base class with the ``__abstract__`` indicator:: + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + class Employee(Base): __abstract__ = True @@ -747,10 +759,6 @@ base class with the ``__abstract__`` indicator:: name = Column(String(50)) manager_data = Column(String(40)) - __mapper_args__ = { - "polymorphic_identity": "manager", - } - class Engineer(Employee): __tablename__ = "engineer" @@ -758,10 +766,6 @@ base class with the ``__abstract__`` indicator:: name = Column(String(50)) engineer_info = Column(String(40)) - __mapper_args__ = { - "polymorphic_identity": "engineer", - } - Above, we are not actually making use of SQLAlchemy's inheritance mapping facilities; we can load and persist instances of ``Manager`` and ``Engineer`` normally. The situation changes however when we need to **query polymorphically**, @@ -781,6 +785,9 @@ To help with this, Declarative offers a variant of the :class:`.ConcreteBase` class called :class:`.AbstractConcreteBase` which achieves this automatically:: from sqlalchemy.ext.declarative import AbstractConcreteBase + from sqlalchemy.orm import declarative_base + + Base = declarative_base() class Employee(AbstractConcreteBase, Base): @@ -810,13 +817,22 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: "concrete": True, } -The :class:`.AbstractConcreteBase` helper class has a more complex internal -process than that of :class:`.ConcreteBase`, in that the entire mapping + Base.registry.configure() + +Above, the :meth:`_orm.registry.configure` method is invoked, which will +trigger the ``Employee`` class to be actually mapped; before the configuration +step, the class has no mapping as the sub-tables which it will query from +have not yet been defined. This process is more complex than that of +:class:`.ConcreteBase`, in that the entire mapping of the base class must be delayed until all the subclasses have been declared. With a mapping like the above, only instances of ``Manager`` and ``Engineer`` may be persisted; querying against the ``Employee`` class will always produce ``Manager`` and ``Engineer`` objects. +.. seealso:: + + :class:`.AbstractConcreteBase` + Classical and Semi-Classical Concrete Polymorphic Configuration +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 78188416aa3..4f60d8bf282 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -144,8 +144,8 @@ class AbstractConcreteBase(ConcreteBase): .. note:: - The :class:`.AbstractConcreteBase` class does not intend to set up the - mapping for the base class until all the subclasses have been defined, + The :class:`.AbstractConcreteBase` delays the mapper creation of the + base class until all the subclasses have been defined, as it needs to create a mapping against a selectable that will include all subclass tables. In order to achieve this, it waits for the **mapper configuration event** to occur, at which point it scans @@ -155,22 +155,20 @@ class AbstractConcreteBase(ConcreteBase): While this event is normally invoked automatically, in the case of :class:`.AbstractConcreteBase`, it may be necessary to invoke it explicitly after **all** subclass mappings are defined, if the first - operation is to be a query against this base class. To do so, invoke - :func:`.configure_mappers` once all the desired classes have been - configured:: - - from sqlalchemy.orm import configure_mappers - - configure_mappers() - - .. seealso:: - - :func:`_orm.configure_mappers` + operation is to be a query against this base class. To do so, once all + the desired classes have been configured, the + :meth:`_orm.registry.configure` method on the :class:`_orm.registry` + in use can be invoked, which is available in relation to a particular + declarative base class:: + Base.registry.configure() Example:: from sqlalchemy.ext.declarative import AbstractConcreteBase + from sqlalchemy.orm import declarative_base + + Base = declarative_base() class Employee(AbstractConcreteBase, Base): pass @@ -183,9 +181,10 @@ class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager', - 'concrete':True} + 'concrete':True + } - configure_mappers() + Base.registry.configure() The abstract base class is handled by declarative in a special way; at class configuration time, it behaves like a declarative mixin @@ -221,18 +220,17 @@ class Manager(Employee): __mapper_args__ = { 'polymorphic_identity':'manager', - 'concrete':True} + 'concrete':True + } - configure_mappers() + Base.registry.configure() When we make use of our mappings however, both ``Manager`` and ``Employee`` will have an independently usable ``.company`` attribute:: - session.query(Employee).filter(Employee.company.has(id=5)) - - .. versionchanged:: 1.0.0 - The mechanics of :class:`.AbstractConcreteBase` - have been reworked to support relationships established directly - on the abstract base, without any special configurational steps. + session.execute( + select(Employee).filter(Employee.company.has(id=5)) + ) .. seealso:: @@ -240,6 +238,8 @@ class Manager(Employee): :ref:`concrete_inheritance` + :ref:`abstract_concrete_base` + """ __no_table__ = True From 8237ba234d672c264ed12924c06efd5351903ab4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 18 Aug 2022 13:56:50 -0400 Subject: [PATCH 344/632] set old declarative docs as orphan these otherwise show up in the sidebar where they are confusing Change-Id: Ic7bdcd31207a135e2805241928ca2379a8189565 (cherry picked from commit e2e412f6a3354682ab792f3f9eefc537207a44b4) --- doc/build/orm/extensions/declarative/api.rst | 2 ++ doc/build/orm/extensions/declarative/basic_use.rst | 8 +++----- doc/build/orm/extensions/declarative/index.rst | 12 ------------ doc/build/orm/extensions/declarative/inheritance.rst | 2 ++ doc/build/orm/extensions/declarative/mixins.rst | 2 ++ .../orm/extensions/declarative/relationships.rst | 2 ++ .../orm/extensions/declarative/table_config.rst | 2 ++ 7 files changed, 13 insertions(+), 17 deletions(-) diff --git a/doc/build/orm/extensions/declarative/api.rst b/doc/build/orm/extensions/declarative/api.rst index e41e735d37b..98924c2e275 100644 --- a/doc/build/orm/extensions/declarative/api.rst +++ b/doc/build/orm/extensions/declarative/api.rst @@ -1,3 +1,5 @@ +:orphan: + .. automodule:: sqlalchemy.ext.declarative =============== diff --git a/doc/build/orm/extensions/declarative/basic_use.rst b/doc/build/orm/extensions/declarative/basic_use.rst index f1ce1d4a026..49903559d5c 100644 --- a/doc/build/orm/extensions/declarative/basic_use.rst +++ b/doc/build/orm/extensions/declarative/basic_use.rst @@ -1,3 +1,5 @@ +:orphan: + ========= Basic Use ========= @@ -20,11 +22,7 @@ This section has moved to :ref:`orm_declarative_metadata`. Class Constructor ================= -As a convenience feature, the :func:`declarative_base` sets a default -constructor on classes which takes keyword arguments, and assigns them -to the named attributes:: - - e = Engineer(primary_language='python') +This section has moved to :ref:`orm_mapper_configuration_overview`. Mapper Configuration ==================== diff --git a/doc/build/orm/extensions/declarative/index.rst b/doc/build/orm/extensions/declarative/index.rst index 7ef2551c619..6cf1a60a1c6 100644 --- a/doc/build/orm/extensions/declarative/index.rst +++ b/doc/build/orm/extensions/declarative/index.rst @@ -22,15 +22,3 @@ mapping API. .. autoclass:: DeferredReflection :members: -.. these pages have all been integrated into the main ORM documentation - however are still here as placeholder docs with links to where they've moved - -.. toctree:: - :hidden: - - api - basic_use - inheritance - mixins - relationships - table_config \ No newline at end of file diff --git a/doc/build/orm/extensions/declarative/inheritance.rst b/doc/build/orm/extensions/declarative/inheritance.rst index d65cafd355a..849664a3c33 100644 --- a/doc/build/orm/extensions/declarative/inheritance.rst +++ b/doc/build/orm/extensions/declarative/inheritance.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_inheritance: Declarative Inheritance diff --git a/doc/build/orm/extensions/declarative/mixins.rst b/doc/build/orm/extensions/declarative/mixins.rst index 221e8f8f8c9..cde4c12bd16 100644 --- a/doc/build/orm/extensions/declarative/mixins.rst +++ b/doc/build/orm/extensions/declarative/mixins.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_mixins: Mixin and Custom Base Classes diff --git a/doc/build/orm/extensions/declarative/relationships.rst b/doc/build/orm/extensions/declarative/relationships.rst index c5c83b1711c..c0df8b49cff 100644 --- a/doc/build/orm/extensions/declarative/relationships.rst +++ b/doc/build/orm/extensions/declarative/relationships.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_configuring_relationships: ========================= diff --git a/doc/build/orm/extensions/declarative/table_config.rst b/doc/build/orm/extensions/declarative/table_config.rst index d51fb1831d7..05ad46d6ccc 100644 --- a/doc/build/orm/extensions/declarative/table_config.rst +++ b/doc/build/orm/extensions/declarative/table_config.rst @@ -1,3 +1,5 @@ +:orphan: + .. _declarative_table_args: =================== From c0c5709b43376ebfa2179edf0f9e7af61354c360 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Aug 2022 09:49:25 -0400 Subject: [PATCH 345/632] fix some inaccuracies in with_variant doc * the table wont create on mysql/mariadb b.c. user_name had no length * "utf-8" is not recognized by mysql/mariadb, use "utf8" * mysql or mariadb name match is determined by the URL, not the actual DB that is detected (I know I made it work that way but I forgot) * for the 1.4 backport only, will remove the "mariadb" part as we dont support that API, #8408 Fixes: #8408 Change-Id: I5b0a58a3f94a3450631e2405bd07d0a77599ae26 (cherry picked from commit 7f99c4ab55a80ee428b3466e9fa476d6ea03bfaf) --- doc/build/core/type_basics.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index eb6c8791258..49fc715f06f 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -139,7 +139,7 @@ makes use of the :meth:`_types.TypeEngine.with_variant` method in order to Such as, to use the :class:`_types.String` datatype, but when running on MySQL to make use of the :paramref:`_mysql.VARCHAR.charset` parameter of -:class:`_mysql.VARCHAR` when the table is created on MySQL or MariaDB, +:class:`_mysql.VARCHAR` when the table is created on MySQL, :meth:`_types.TypeEngine.with_variant` may be used as below:: from sqlalchemy import MetaData @@ -151,16 +151,17 @@ to make use of the :paramref:`_mysql.VARCHAR.charset` parameter of user = Table( "user", metadata_obj, - Column("user_name", String, primary_key=True), + Column("user_name", String(100), primary_key=True), Column( "bio", - String(255).with_variant(VARCHAR(255, charset="utf-8"), "mysql", "mariadb"), + String(255).with_variant(VARCHAR(255, charset="utf8"), "mysql"), ), ) In the above table definition, the ``"bio"`` column will have string-behaviors -on all backends. On most backends it will render in DDL as ``VARCHAR``. -However on MySQL and MariaDB, it will render as ``VARCHAR(255) CHARACTER SET utf-8``. +on all backends. On most backends it will render in DDL as ``VARCHAR``. However +on MySQL (indicated by database URLs that start with ``mysql``), it will +render as ``VARCHAR(255) CHARACTER SET utf8``. .. seealso:: From f91058cf971d873632181d57790e5b719d847a89 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Aug 2022 10:16:34 -0400 Subject: [PATCH 346/632] remove notes re: current pymssql DBAPI features Fixes: #8337 Change-Id: Ib0c107bb386489dcb6d1683f29d0a9574dd96f1e (cherry picked from commit 319e5a3d8e92ccc97faeeaecd55313131e5a611b) --- lib/sqlalchemy/dialects/mssql/pymssql.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index 84c5fed6f50..c4ec66d1176 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -19,24 +19,6 @@ pymssql is currently not included in SQLAlchemy's continuous integration (CI) testing. -Modern versions of this driver worked very well with SQL Server and FreeTDS -from Linux and were highly recommended. However, pymssql is currently -unmaintained and has fallen behind the progress of the Microsoft ODBC driver in -its support for newer features of SQL Server. The latest official release of -pymssql at the time of this document is version 2.1.4 (August, 2018) and it -lacks support for: - -1. table-valued parameters (TVPs), -2. ``datetimeoffset`` columns using timezone-aware ``datetime`` objects - (values are sent and retrieved as strings), and -3. encrypted connections (e.g., to Azure SQL), when pymssql is installed from - the pre-built wheels. Support for encrypted connections requires building - pymssql from source, which can be a nuisance, especially under Windows. - -The above features are all supported by mssql+pyodbc when using Microsoft's -ODBC Driver for SQL Server (msodbcsql), which is now available for Windows, -(several flavors of) Linux, and macOS. - """ # noqa import re From 9fe5d4c055c24ab17810b981ed404b94016b8ce5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Aug 2022 11:12:41 -0400 Subject: [PATCH 347/632] remove narrative "reconstructor" document this event hook is not commonly used and this page does not fit into the current narrative very well. We should possibly write a new paragraph regarding how instances load at some point though the best place to put it is not clear. (cherry picked from commit e7b2055866f315a77e1e19a832a5afdae90bfd9f) --- doc/build/orm/constructors.rst | 59 +++---------------------------- doc/build/orm/loading_objects.rst | 1 - doc/build/orm/mapping_api.rst | 2 ++ doc/build/orm/mapping_styles.rst | 2 ++ lib/sqlalchemy/orm/mapper.py | 8 +++-- 5 files changed, 14 insertions(+), 58 deletions(-) diff --git a/doc/build/orm/constructors.rst b/doc/build/orm/constructors.rst index f03ce3a1a38..50ae218c2fe 100644 --- a/doc/build/orm/constructors.rst +++ b/doc/build/orm/constructors.rst @@ -1,3 +1,5 @@ +:orphan: + .. currentmodule:: sqlalchemy.orm .. _mapping_constructors: @@ -5,59 +7,6 @@ Constructors and Object Initialization ====================================== -Mapping imposes no restrictions or requirements on the constructor -(``__init__``) method for the class. You are free to require any arguments for -the function that you wish, assign attributes to the instance that are unknown -to the ORM, and generally do anything else you would normally do when writing -a constructor for a Python class. - -The SQLAlchemy ORM does not call ``__init__`` when recreating objects from -database rows. The ORM's process is somewhat akin to the Python standard -library's ``pickle`` module, invoking the low level ``__new__`` method and -then quietly restoring attributes directly on the instance rather than calling -``__init__``. - -If you need to do some setup on database-loaded instances before they're ready -to use, there is an event hook known as :meth:`.InstanceEvents.load` which -can achieve this; it is also available via a class-specific decorator called -:func:`_orm.reconstructor`. When using :func:`_orm.reconstructor`, -the mapper will invoke a single decorated method with no -arguments every time it loads or reconstructs an instance of the -class. This is -useful for recreating transient properties that are normally assigned in -``__init__``:: - - from sqlalchemy import orm - - - class MyMappedClass: - def __init__(self, data): - self.data = data - # we need stuff on all instances, but not in the database. - self.stuff = [] - - @orm.reconstructor - def init_on_load(self): - self.stuff = [] - -Above, when ``obj = MyMappedClass()`` is executed, the ``__init__`` constructor -is invoked normally and the ``data`` argument is required. When instances are -loaded during a :class:`~sqlalchemy.orm.query.Query` operation as in -``query(MyMappedClass).one()``, ``init_on_load`` is called. - -Any method may be tagged as the :func:`_orm.reconstructor`, even -the ``__init__`` method itself, but only one method may be tagged as such. It is invoked after all immediate -column-level attributes are loaded as well as after eagerly-loaded scalar -relationships. Eagerly loaded collections may be only partially populated -or not populated at all, depending on the kind of eager loading used. - -ORM state changes made to objects at this stage will not be recorded for the -next flush operation, so the activity within a reconstructor should be -conservative. - -:func:`_orm.reconstructor` is a shortcut into a larger system -of "instance level" events, which can be subscribed to using the -event API - see :class:`.InstanceEvents` for the full API description -of these events. +This document has been removed. See :ref:`orm_mapped_class_behavior` +as well as :meth:`_orm.InstanceEvents.load` for what was covered here. -.. autofunction:: reconstructor diff --git a/doc/build/orm/loading_objects.rst b/doc/build/orm/loading_objects.rst index 956ef2f6995..3f6c84bf1d2 100644 --- a/doc/build/orm/loading_objects.rst +++ b/doc/build/orm/loading_objects.rst @@ -24,5 +24,4 @@ sections are currently mixed as far as which style they are using. loading_columns loading_relationships inheritance_loading - constructors query diff --git a/doc/build/orm/mapping_api.rst b/doc/build/orm/mapping_api.rst index 5d0b6c0d023..199d92d4315 100644 --- a/doc/build/orm/mapping_api.rst +++ b/doc/build/orm/mapping_api.rst @@ -34,6 +34,8 @@ Class Mapping API .. autofunction:: polymorphic_union +.. autofunction:: reconstructor + .. autoclass:: Mapper :members: diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index 1b33aa2e29f..84db8cb0870 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -290,6 +290,8 @@ method which passes them along to the :func:`_orm.mapper` function. The full range of parameters accepted are documented at :class:`_orm.mapper`. +.. _orm_mapped_class_behavior: + Mapped Class Behavior ===================== diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index ed221a964a0..e13ce4bff9c 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -3544,6 +3544,12 @@ def reconstructor(fn): method that will be called by the ORM after the instance has been loaded from the database or otherwise reconstituted. + .. tip:: + + The :func:`_orm.reconstructor` decorator makes use of the + :meth:`_orm.InstanceEvents.load` event hook, which can be + used directly. + The reconstructor will be invoked with no arguments. Scalar (non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded @@ -3554,8 +3560,6 @@ def reconstructor(fn): .. seealso:: - :ref:`mapping_constructors` - :meth:`.InstanceEvents.load` """ From ffd2920ddc7ecb3b72c67c0d894770bec901d8ff Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Aug 2022 13:43:59 -0400 Subject: [PATCH 348/632] reformat functions.rst; document coalsce this file was all over the place autodocumenting all the contents of functions.py with no regards to the heading paragraph which seemed to be introducing the generic functions. Use specific autoclass/autofunc docs as automodule is generally unworkable. Add missing docstring for coalesce function. Fixes: #8415 Change-Id: I4c37e6153282ce99b9f5d674f6e802c25ef536e1 (cherry picked from commit 07961a8bd0b965d79855275b0eb80075b57c2d49) --- doc/build/core/functions.rst | 138 ++++++++++++++++++++++++++++++++--- 1 file changed, 127 insertions(+), 11 deletions(-) diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index 8a3c5221fd1..00e384679cc 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -7,24 +7,140 @@ SQL and Generic Functions .. currentmodule:: sqlalchemy.sql.functions -SQL functions which are known to SQLAlchemy with regards to database-specific -rendering, return types and argument behavior. Generic functions are invoked -like all SQL functions, using the :attr:`func` attribute:: +SQL functions are invoked by using the :data:`_sql.func` namespace. +See the tutorial at :ref:`tutorial_functions` for background on how to +use the :data:`_sql.func` object to render SQL functions in statements. - select(func.count()).select_from(sometable) +.. seealso:: + + :ref:`tutorial_functions` - in the :ref:`unified_tutorial` + +Function API +------------ + +The base API for SQL functions, which provides for the :data:`_sql.func` +namespace as well as classes that may be used for extensibility. + +.. autoclass:: AnsiFunction + :exclude-members: inherit_cache, __new__ + +.. autoclass:: Function + :members: + :inherited-members: ColumnElement + :exclude-members: inherit_cache, __new__ + +.. autoclass:: FunctionElement + :members: + :exclude-members: inherit_cache, __new__ + +.. autoclass:: GenericFunction + :exclude-members: inherit_cache, __new__ -Note that any name not known to :attr:`func` generates the function name as is -- there is no restriction on what SQL functions can be called, known or +.. autofunction:: register_function + + +Selected "Known" Functions +-------------------------- + +These are :class:`.GenericFunction` implementations for a selected set of +common SQL functions that set up the expected return type for each function +automatically. The are invoked in the same way as any other member of the +:data:`_sql.func` namespace:: + + select(func.count('*')).select_from(some_table) + +Note that any name not known to :data:`_sql.func` generates the function name +as is - there is no restriction on what SQL functions can be called, known or unknown to SQLAlchemy, built-in or user defined. The section here only describes those functions where SQLAlchemy already knows what argument and return types are in use. -.. seealso:: +.. autoclass:: array_agg + :no-members: - :ref:`tutorial_functions` - in the :ref:`unified_tutorial` +.. autoclass:: char_length + :no-members: -.. automodule:: sqlalchemy.sql.functions - :members: - :exclude-members: func +.. autoclass:: coalesce + :no-members: + +.. autoclass:: concat + :no-members: + +.. autoclass:: count + :no-members: + +.. autoclass:: cube + :no-members: + +.. autoclass:: cume_dist + :no-members: + +.. autoclass:: current_date + :no-members: + +.. autoclass:: current_time + :no-members: + +.. autoclass:: current_timestamp + :no-members: + +.. autoclass:: current_user + :no-members: + +.. autoclass:: dense_rank + :no-members: + +.. autoclass:: grouping_sets + :no-members: + +.. autoclass:: localtime + :no-members: + +.. autoclass:: localtimestamp + :no-members: + +.. autoclass:: max + :no-members: + +.. autoclass:: min + :no-members: + +.. autoclass:: mode + :no-members: + +.. autoclass:: next_value + :no-members: + +.. autoclass:: now + :no-members: + +.. autoclass:: percent_rank + :no-members: + +.. autoclass:: percentile_cont + :no-members: + +.. autoclass:: percentile_disc + :no-members: + +.. autoclass:: random + :no-members: + +.. autoclass:: rank + :no-members: + +.. autoclass:: rollup + :no-members: + +.. autoclass:: session_user + :no-members: + +.. autoclass:: sum + :no-members: +.. autoclass:: sysdate + :no-members: +.. autoclass:: user + :no-members: From 3ffca71b9032a3168c9c946f88eb144b2ec87e64 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Aug 2022 13:55:08 -0400 Subject: [PATCH 349/632] more function adjustments the inherited-members feature works very poorly and inconsistently in sphinx. just dont use it here as it refuses to exclude ColumnOperators methods Change-Id: Ic50865c9901e7225a99ff7f33454da15ff91b12f (cherry picked from commit 25fa4c0250730958c39676007cc39df7b0ee2124) --- doc/build/core/functions.rst | 3 --- lib/sqlalchemy/sql/functions.py | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index 00e384679cc..efa7c78d33f 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -25,9 +25,6 @@ namespace as well as classes that may be used for extensibility. :exclude-members: inherit_cache, __new__ .. autoclass:: Function - :members: - :inherited-members: ColumnElement - :exclude-members: inherit_cache, __new__ .. autoclass:: FunctionElement :members: diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 29f41223d35..2b264e5bf96 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -528,7 +528,7 @@ def alias(self, name=None, joins_implicitly=False): in the columns or where clause, for a backend such as PostgreSQL. For a full table-valued expression, use the - :meth:`_function.FunctionElement.table_valued` method first to + :meth:`_functions.FunctionElement.table_valued` method first to establish named columns. e.g.:: From 1c43ad66478905c8d6cf40d9c818fc1ceeb4efbb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Aug 2022 09:28:06 -0400 Subject: [PATCH 350/632] integrate connection.terminate() for supporting dialects Integrated support for asyncpg's ``terminate()`` method call for cases where the connection pool is recycling a possibly timed-out connection, where a connection is being garbage collected that wasn't gracefully closed, as well as when the connection has been invalidated. This allows asyncpg to abandon the connection without waiting for a response that may incur long timeouts. Fixes: #8419 Change-Id: Ia575af779d5733b483a72dff3690b8bbbad2bb05 (cherry picked from commit 3b7e621aa728d9b01dbac4150e13ea2ef6af35a3) --- doc/build/changelog/unreleased_14/8419.rst | 10 +++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 7 ++++ lib/sqlalchemy/engine/default.py | 4 ++ lib/sqlalchemy/engine/interfaces.py | 17 +++++++++ lib/sqlalchemy/pool/base.py | 38 +++++++++++++------ test/engine/test_logging.py | 2 +- test/engine/test_pool.py | 21 +++++++--- 7 files changed, 82 insertions(+), 17 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8419.rst diff --git a/doc/build/changelog/unreleased_14/8419.rst b/doc/build/changelog/unreleased_14/8419.rst new file mode 100644 index 00000000000..a095d858d2e --- /dev/null +++ b/doc/build/changelog/unreleased_14/8419.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, asyncio + :tickets: 8419 + + Integrated support for asyncpg's ``terminate()`` method call for cases + where the connection pool is recycling a possibly timed-out connection, + where a connection is being garbage collected that wasn't gracefully + closed, as well as when the connection has been invalidated. This allows + asyncpg to abandon the connection without waiting for a response that may + incur long timeouts. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 305ad46a32b..39b0f544cb4 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -748,6 +748,9 @@ def close(self): self.await_(self._connection.close()) + def terminate(self): + self._connection.terminate() + class AsyncAdaptFallback_asyncpg_connection(AsyncAdapt_asyncpg_connection): __slots__ = () @@ -891,6 +894,7 @@ class PGDialect_asyncpg(PGDialect): supports_server_side_cursors = True supports_unicode_binds = True + has_terminate = True default_paramstyle = "format" supports_sane_multi_rowcount = False @@ -987,6 +991,9 @@ def set_deferrable(self, connection, value): def get_deferrable(self, connection): return connection.deferrable + def do_terminate(self, dbapi_connection) -> None: + dbapi_connection.terminate() + def create_connect_args(self, url): opts = url.translate_connect_args(username="user") diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 268a2d60930..6b58c44696b 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -231,6 +231,7 @@ class DefaultDialect(interfaces.Dialect): CACHING_DISABLED = CACHING_DISABLED NO_CACHE_KEY = NO_CACHE_KEY NO_DIALECT_SUPPORT = NO_DIALECT_SUPPORT + has_terminate = False @util.deprecated_params( convert_unicode=( @@ -684,6 +685,9 @@ def do_rollback(self, dbapi_connection): def do_commit(self, dbapi_connection): dbapi_connection.commit() + def do_terminate(self, dbapi_connection): + self.do_close(dbapi_connection) + def do_close(self, dbapi_connection): dbapi_connection.close() diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index 4f2524aef2d..4e0ab8e72fd 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -583,6 +583,23 @@ def do_commit(self, dbapi_connection): raise NotImplementedError() + def do_terminate(self, dbapi_connection): + """Provide an implementation of ``connection.close()`` that tries as + much as possible to not block, given a DBAPI + connection. + + In the vast majority of cases this just calls .close(), however + for some asyncio dialects may call upon different API features. + + This hook is called by the :class:`_pool.Pool` + when a connection is being recycled or has been invalidated. + + .. versionadded:: 1.4.41 + + """ + + raise NotImplementedError() + def do_close(self, dbapi_connection): """Provide an implementation of ``connection.close()``, given a DBAPI connection. diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index cde28c2fb02..9f16c654334 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -36,6 +36,7 @@ class _ConnDialect(object): """ is_async = False + has_terminate = False def do_rollback(self, dbapi_connection): dbapi_connection.rollback() @@ -43,6 +44,9 @@ def do_rollback(self, dbapi_connection): def do_commit(self, dbapi_connection): dbapi_connection.commit() + def do_terminate(self, dbapi_connection): + dbapi_connection.close() + def do_close(self, dbapi_connection): dbapi_connection.close() @@ -240,11 +244,17 @@ def _should_wrap_creator(self, creator): else: return lambda crec: creator() - def _close_connection(self, connection): - self.logger.debug("Closing connection %r", connection) - + def _close_connection(self, connection, terminate=False): + self.logger.debug( + "%s connection %r", + "Hard-closing" if terminate else "Closing", + connection, + ) try: - self._dialect.do_close(connection) + if terminate: + self._dialect.do_terminate(connection) + else: + self._dialect.do_close(connection) except Exception: self.logger.error( "Exception closing connection %r", connection, exc_info=True @@ -584,7 +594,7 @@ def invalidate(self, e=None, soft=False): if soft: self._soft_invalidate_time = time.time() else: - self.__close() + self.__close(terminate=True) self.dbapi_connection = None def get_connection(self): @@ -630,7 +640,7 @@ def get_connection(self): recycle = True if recycle: - self.__close() + self.__close(terminate=True) self.info.clear() self.__connect() @@ -643,11 +653,13 @@ def _is_hard_or_soft_invalidated(self): or (self._soft_invalidate_time > self.starttime) ) - def __close(self): + def __close(self, terminate=False): self.finalize_callback.clear() if self.__pool.dispatch.close: self.__pool.dispatch.close(self.dbapi_connection, self) - self.__pool._close_connection(self.dbapi_connection) + self.__pool._close_connection( + self.dbapi_connection, terminate=terminate + ) self.dbapi_connection = None def __connect(self): @@ -709,7 +721,9 @@ def _finalize_fairy( dbapi_connection = connection_record.dbapi_connection # null pool is not _is_asyncio but can be used also with async dialects - dont_restore_gced = pool._dialect.is_async + dont_restore_gced = ( + pool._dialect.is_async and not pool._dialect.has_terminate + ) if dont_restore_gced: detach = not connection_record or ref @@ -751,8 +765,10 @@ def _finalize_fairy( else: message = ( "The garbage collector is trying to clean up " - "connection %r. This feature is unsupported on async " - "dbapi, since no IO can be performed at this stage to " + "connection %r. This feature is unsupported on " + "unsupported on asyncio " + 'dbapis that lack a "terminate" feature, ' + "since no IO can be performed at this stage to " "reset the connection. Please close out all " "connections when they are no longer used, calling " "``close()`` or using a context manager to " diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py index 7a0ed6e7934..5b0d6c762e2 100644 --- a/test/engine/test_logging.py +++ b/test/engine/test_logging.py @@ -468,7 +468,7 @@ def _test_queuepool(self, q, dispose=True): "Connection %r checked out from pool", "Connection %r being returned to pool%s", "Connection %s rollback-on-return", - "Closing connection %r", + "%s connection %r", ] + (["Pool disposed. %s"] if dispose else []), ) diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 320a9bb5854..879369a9ffd 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -92,10 +92,13 @@ def _queuepool_fixture(self, **kw): def _queuepool_dbapi_fixture(self, **kw): dbapi = MockDBAPI() _is_asyncio = kw.pop("_is_asyncio", False) + _has_terminate = kw.pop("_has_terminate", False) p = pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw) if _is_asyncio: p._is_asyncio = True p._dialect = _AsyncConnDialect() + if _has_terminate: + p._dialect.has_terminate = True return dbapi, p @@ -468,8 +471,10 @@ def checkout(*arg, **kw): return p, canary - def _checkin_event_fixture(self, _is_asyncio=False): - p = self._queuepool_fixture(_is_asyncio=_is_asyncio) + def _checkin_event_fixture(self, _is_asyncio=False, _has_terminate=False): + p = self._queuepool_fixture( + _is_asyncio=_is_asyncio, _has_terminate=_has_terminate + ) canary = [] @event.listens_for(p, "checkin") @@ -744,9 +749,13 @@ def test_invalidate_event_exception(self): assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc - @testing.combinations((True, testing.requires.python3), (False,)) - def test_checkin_event_gc(self, detach_gced): - p, canary = self._checkin_event_fixture(_is_asyncio=detach_gced) + @testing.combinations((True,), (False,), argnames="is_asyncio") + @testing.combinations((True,), (False,), argnames="has_terminate") + @testing.requires.python3 + def test_checkin_event_gc(self, is_asyncio, has_terminate): + p, canary = self._checkin_event_fixture( + _is_asyncio=is_asyncio, _has_terminate=has_terminate + ) c1 = p.connect() @@ -756,6 +765,8 @@ def test_checkin_event_gc(self, detach_gced): del c1 lazy_gc() + detach_gced = is_asyncio and not has_terminate + if detach_gced: # "close_detached" is not called because for asyncio the # connection is just lost. From ef65749704796130768c31dec37064d70e6695e7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 25 Aug 2022 14:05:16 -0400 Subject: [PATCH 351/632] try out greenlet / cython on py311 I've updated jenkins to see what happens Change-Id: If71b3f6da98dacd21419e8ece2395bc5fd20d133 (cherry picked from commit e14a8e3295d289e256bf236d0461034f35b7fcc3) --- test/ext/asyncio/test_engine_py3k.py | 4 ++-- tox.ini | 4 ---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index eddf4e52fc3..673eff2fb19 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -1,4 +1,3 @@ -import asyncio import inspect as stdlib_inspect from sqlalchemy import Column @@ -488,7 +487,8 @@ async def test_dispose(self, async_engine): @async_test async def test_init_once_concurrency(self, async_engine): async with async_engine.connect() as c1, async_engine.connect() as c2: - await asyncio.wait([c1, c2]) + eq_(await c1.scalar(select(1)), 1) + eq_(await c2.scalar(select(1)), 1) @async_test async def test_connect_ctxmanager(self, async_engine): diff --git a/tox.ini b/tox.ini index 43eb04d8174..2000351716b 100644 --- a/tox.ini +++ b/tox.ini @@ -21,10 +21,6 @@ deps= pytest-xdist mock; python_version < '3.3' - # cython and greenlet both not working on 3.11 - # note cython not working for 3.11 at all right now - git+https://github.com/sqlalchemyorg/greenlet/@fix_py311_cpp#egg=greenlet; python_version >= '3.11' - sqlite: .[aiosqlite] sqlite_file: .[aiosqlite] sqlite_file: .[sqlcipher]; python_version >= '3' and python_version < '3.10' From 63c992b5668b3f2dd9e178cf0ad43c69174e9da0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 25 Aug 2022 14:05:16 -0400 Subject: [PATCH 352/632] run github pipeline on python 3.11 Change-Id: I555c1e16c5347e67da4c70414b4677b2d3afebd5 --- .github/workflows/create-wheels.yaml | 3 +++ .github/workflows/run-test.yaml | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 6dbfcfa8dc7..06999b60608 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -29,6 +29,7 @@ jobs: - "3.8" - "3.9" - "3.10" + - "3.11.0-rc - 3.11" architecture: - x64 - x86 @@ -137,6 +138,7 @@ jobs: - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 architecture: - x64 @@ -291,6 +293,7 @@ jobs: - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 fail-fast: false diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 36dfce250d2..fd77e3987ff 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -37,6 +37,7 @@ jobs: - "3.8" - "3.9" - "3.10" + - "3.11.0-rc - 3.11" # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 # which also seems to be in 3.9 # - "pypy-3.9" @@ -105,6 +106,7 @@ jobs: - cp38-cp38 - cp39-cp39 - cp310-cp310 + - cp311-cp311 build-type: - "cext" - "nocext" @@ -146,7 +148,7 @@ jobs: - "3.8" - "3.9" - "3.10" - + - "3.11.0-rc - 3.11" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands @@ -178,7 +180,7 @@ jobs: os: - "ubuntu-latest" python-version: - - "3.9" + - "3.10" fail-fast: false From c1d6e807730b9a0ed2d9a0436aeae43cc0cf47f4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Aug 2022 10:43:36 -0400 Subject: [PATCH 353/632] refine ruleset to determine when poly adaption should be used Fixed regression appearing in the 1.4 series where a joined-inheritance query placed as a subquery within an enclosing query for that same entity would fail to render the JOIN correctly for the inner query. The issue manifested in two different ways prior and subsequent to version 1.4.18 (related issue #6595), in one case rendering JOIN twice, in the other losing the JOIN entirely. To resolve, the conditions under which "polymorphic loading" are applied have been scaled back to not be invoked for simple joined inheritance queries. Fixes: #8456 Change-Id: Ie4332fadb1dfc670cd31d098a6586a9f6976bcf7 (cherry picked from commit 137b50e1ecf1ddea1ff63d94f24f6445e6807dc9) --- doc/build/changelog/unreleased_14/8456.rst | 12 +++++++ lib/sqlalchemy/orm/context.py | 19 ++++------- lib/sqlalchemy/orm/mapper.py | 35 +++++++++++++++++++ lib/sqlalchemy/orm/strategies.py | 1 + test/orm/inheritance/test_concrete.py | 6 ++++ test/orm/inheritance/test_polymorphic_rel.py | 36 ++++++++++++++++++++ 6 files changed, 96 insertions(+), 13 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8456.rst diff --git a/doc/build/changelog/unreleased_14/8456.rst b/doc/build/changelog/unreleased_14/8456.rst new file mode 100644 index 00000000000..ca769fd3424 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8456.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 8456 + + Fixed regression appearing in the 1.4 series where a joined-inheritance + query placed as a subquery within an enclosing query for that same entity + would fail to render the JOIN correctly for the inner query. The issue + manifested in two different ways prior and subsequent to version 1.4.18 + (related issue #6595), in one case rendering JOIN twice, in the other + losing the JOIN entirely. To resolve, the conditions under which + "polymorphic loading" are applied have been scaled back to not be invoked + for simple joined inheritance queries. diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 592a2c1e4df..d5a742cc578 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -361,6 +361,10 @@ def _lead_mapper_entities(self): ] def _create_with_polymorphic_adapter(self, ext_info, selectable): + """given MapperEntity or ORMColumnEntity, setup polymorphic loading + if appropriate + + """ if ( not ext_info.is_aliased_class and ext_info.mapper.persist_selectable @@ -2559,14 +2563,7 @@ def __init__( self._with_polymorphic_mappers = ext_info.with_polymorphic_mappers self._polymorphic_discriminator = ext_info.polymorphic_on - if ( - mapper.with_polymorphic - # controversy - only if inheriting mapper is also - # polymorphic? - # or (mapper.inherits and mapper.inherits.with_polymorphic) - or mapper.inherits - or mapper._requires_row_aliasing - ): + if mapper._should_select_with_poly_adapter: compile_state._create_with_polymorphic_adapter( ext_info, self.selectable ) @@ -3060,11 +3057,7 @@ def __init__( self._extra_entities = (self.expr, self.column) - if ( - mapper.with_polymorphic - or mapper.inherits - or mapper._requires_row_aliasing - ): + if mapper._should_select_with_poly_adapter: compile_state._create_with_polymorphic_adapter( ezero, ezero.selectable ) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index e13ce4bff9c..2554dde0de8 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -2125,6 +2125,41 @@ def _single_table_criterion(self): else: return None + @HasMemoized.memoized_attribute + def _should_select_with_poly_adapter(self): + """determine if _MapperEntity or _ORMColumnEntity will need to use + polymorphic adaption when setting up a SELECT as well as fetching + rows for mapped classes and subclasses against this Mapper. + + moved here from context.py for #8456 to generalize the ruleset + for this condition. + + """ + + # this has been simplified as of #8456. + # rule is: if we have a with_polymorphic or a concrete-style + # polymorphic selectable, *or* if the base mapper has either of those, + # we turn on the adaption thing. if not, we do *no* adaption. + # + # this splits the behavior among the "regular" joined inheritance + # and single inheritance mappers, vs. the "weird / difficult" + # concrete and joined inh mappings that use a with_polymorphic of + # some kind or polymorphic_union. + # + # note we have some tests in test_polymorphic_rel that query against + # a subclass, then refer to the superclass that has a with_polymorphic + # on it (such as test_join_from_polymorphic_explicit_aliased_three). + # these tests actually adapt the polymorphic selectable (like, the + # UNION or the SELECT subquery with JOIN in it) to be just the simple + # subclass table. Hence even if we are a "plain" inheriting mapper + # but our base has a wpoly on it, we turn on adaption. + return ( + self.with_polymorphic + or self._requires_row_aliasing + or self.base_mapper.with_polymorphic + or self.base_mapper._requires_row_aliasing + ) + @HasMemoized.memoized_attribute def _with_polymorphic_mappers(self): self._check_configure() diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 288e6e06bfc..944c114a640 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -249,6 +249,7 @@ def create_row_processor( ): # look through list of columns represented here # to see which, if any, is present in the row. + for col in self.columns: if adapter: col = adapter.columns[col] diff --git a/test/orm/inheritance/test_concrete.py b/test/orm/inheritance/test_concrete.py index 56beffedd57..a7ff156c544 100644 --- a/test/orm/inheritance/test_concrete.py +++ b/test/orm/inheritance/test_concrete.py @@ -195,6 +195,7 @@ def test_basic(self): "Manager Sally knows how to manage things", ] ) + assert set([repr(x) for x in session.query(Manager)]) == set( ["Manager Sally knows how to manage things"] ) @@ -1673,6 +1674,11 @@ def test_contains_eager(self): "metadata.some_data AS some_data FROM b " "JOIN metadata ON metadata.id = b.metadata_id " "WHERE metadata.id < :id_3) AS anon_1 ORDER BY anon_1.id", + # tip: whether or not there is "id_2" and "id_3" here, + # or just "id_2", is based on whether or not the two + # queries had polymorphic adaption proceed, so that the + # two filter criterias are different vs. the same object. see + # mapper._should_select_with_poly_adapter added in #8456. [{"param_1": "a", "id_2": 3, "param_2": "b", "id_3": 3}], ) ) diff --git a/test/orm/inheritance/test_polymorphic_rel.py b/test/orm/inheritance/test_polymorphic_rel.py index 9ccec61ee12..d30d7a28c48 100644 --- a/test/orm/inheritance/test_polymorphic_rel.py +++ b/test/orm/inheritance/test_polymorphic_rel.py @@ -503,6 +503,7 @@ def test_join_from_polymorphic_explicit_aliased_two(self): def test_join_from_polymorphic_explicit_aliased_three(self): sess = fixture_session() pa = aliased(Paperwork) + eq_( sess.query(Engineer) .order_by(Person.person_id) @@ -2101,6 +2102,41 @@ def test_correlation_three(self): class PolymorphicTest(_PolymorphicTestBase, _Polymorphic): + def test_joined_aliasing_unrelated_subuqery(self): + """test #8456""" + + inner = select(Engineer).where(Engineer.name == "vlad").subquery() + + crit = select(inner.c.person_id) + + outer = select(Engineer).where(Engineer.person_id.in_(crit)) + + # this query will not work at all for any "polymorphic" case + # as it will adapt the inner query as well. for those cases, + # aliased() has to be used for the inner entity to disambiguate it. + self.assert_compile( + outer, + "SELECT engineers.person_id, people.person_id AS person_id_1, " + "people.company_id, people.name, people.type, engineers.status, " + "engineers.engineer_name, engineers.primary_language " + "FROM people JOIN engineers " + "ON people.person_id = engineers.person_id " + "WHERE engineers.person_id IN " + "(SELECT anon_1.person_id FROM " + "(SELECT engineers.person_id AS person_id, " + "people.person_id AS person_id_1, " + "people.company_id AS company_id, people.name AS name, " + "people.type AS type, engineers.status AS status, " + "engineers.engineer_name AS engineer_name, " + "engineers.primary_language AS primary_language FROM people " + "JOIN engineers ON people.person_id = engineers.person_id " + "WHERE people.name = :name_1) " + "AS anon_1)", + ) + + sess = fixture_session() + eq_(sess.scalars(outer).all(), [Engineer(name="vlad")]) + def test_primary_eager_aliasing_three_dont_reset_selectable(self): """test now related to #7262 From 5972a06ea18434d620685eb024be11acef1ce73d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 30 Aug 2022 09:50:03 -0400 Subject: [PATCH 354/632] apply consistent ORM mutable notes for all mutable SQL types in https://github.com/sqlalchemy/sqlalchemy/discussions/8447 I was surprised that we didnt have any notes about using Mutable for ARRAY classes, since we have them for HSTORE and JSON. Add a consistent topic box for these so we have something to point towards. Change-Id: Idfa1b2cbee67024545f4fa299e4c875075ec7d3f (cherry picked from commit 2f146b172ad228e40f1e8d5f1d2abc888ae5e669) --- lib/sqlalchemy/dialects/postgresql/array.py | 25 +++++++++++ lib/sqlalchemy/dialects/postgresql/hstore.py | 46 +++++++++++--------- lib/sqlalchemy/sql/sqltypes.py | 31 ++++++++++++- 3 files changed, 80 insertions(+), 22 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index daf7c5d40d0..2f915c975ea 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -190,6 +190,31 @@ class also conjunction with the :class:`.ENUM` type. For a workaround, see the special type at :ref:`postgresql_array_of_enum`. + .. container:: topic + + **Detecting Changes in ARRAY columns when using the ORM** + + The :class:`_postgresql.ARRAY` type, when used with the SQLAlchemy ORM, + does not detect in-place mutations to the array. In order to detect + these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using + the :class:`.MutableList` class:: + + from sqlalchemy.dialects.postgresql import ARRAY + from sqlalchemy.ext.mutable import MutableList + + class SomeOrmClass(Base): + # ... + + data = Column(MutableList.as_mutable(ARRAY(Integer))) + + This extension will allow "in-place" changes such to the array + such as ``.append()`` to produce events which will be detected by the + unit of work. Note that changes to elements **inside** the array, + including subarrays that are mutated in place, are **not** detected. + + Alternatively, assigning a new array value to an ORM element that + replaces the old one will always trigger a change event. + .. seealso:: :class:`_types.ARRAY` - base array type diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 29800d2e39b..3859395a86e 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -96,34 +96,38 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine): For a full list of special methods see :class:`.HSTORE.comparator_factory`. - For usage with the SQLAlchemy ORM, it may be desirable to combine - the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary - now part of the :mod:`sqlalchemy.ext.mutable` - extension. This extension will allow "in-place" changes to the - dictionary, e.g. addition of new keys or replacement/removal of existing - keys to/from the current dictionary, to produce events which will be - detected by the unit of work:: + .. container:: topic - from sqlalchemy.ext.mutable import MutableDict + **Detecting Changes in HSTORE columns when using the ORM** - class MyClass(Base): - __tablename__ = 'data_table' + For usage with the SQLAlchemy ORM, it may be desirable to combine the + usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now + part of the :mod:`sqlalchemy.ext.mutable` extension. This extension + will allow "in-place" changes to the dictionary, e.g. addition of new + keys or replacement/removal of existing keys to/from the current + dictionary, to produce events which will be detected by the unit of + work:: - id = Column(Integer, primary_key=True) - data = Column(MutableDict.as_mutable(HSTORE)) + from sqlalchemy.ext.mutable import MutableDict - my_object = session.query(MyClass).one() + class MyClass(Base): + __tablename__ = 'data_table' - # in-place mutation, requires Mutable extension - # in order for the ORM to detect - my_object.data['some_key'] = 'some value' + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(HSTORE)) - session.commit() + my_object = session.query(MyClass).one() - When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM - will not be alerted to any changes to the contents of an existing - dictionary, unless that dictionary value is re-assigned to the - HSTORE-attribute itself, thus generating a change event. + # in-place mutation, requires Mutable extension + # in order for the ORM to detect + my_object.data['some_key'] = 'some value' + + session.commit() + + When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM + will not be alerted to any changes to the contents of an existing + dictionary, unless that dictionary value is re-assigned to the + HSTORE-attribute itself, thus generating a change event. .. seealso:: diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 322bfec27e4..4a988755cd2 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2253,11 +2253,15 @@ class JSON(Indexable, TypeEngine): The :class:`_types.JSON` type, when used with the SQLAlchemy ORM, does not detect in-place mutations to the structure. In order to detect these, the - :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will + :mod:`sqlalchemy.ext.mutable` extension must be used, most typically + using the :class:`.MutableDict` class. This extension will allow "in-place" changes to the datastructure to produce events which will be detected by the unit of work. See the example at :class:`.HSTORE` for a simple example involving a dictionary. + Alternatively, assigning a JSON structure to an ORM element that + replaces the old one will always trigger a change event. + **Support for JSON null vs. SQL NULL** When working with NULL values, the :class:`_types.JSON` type recommends the @@ -2764,6 +2768,31 @@ class ARRAY(SchemaEventTarget, Indexable, Concatenable, TypeEngine): :meth:`.types.ARRAY.Comparator.all`. The PostgreSQL-specific version of :class:`_types.ARRAY` also provides additional operators. + .. container:: topic + + **Detecting Changes in ARRAY columns when using the ORM** + + The :class:`_sqltypes.ARRAY` type, when used with the SQLAlchemy ORM, + does not detect in-place mutations to the array. In order to detect + these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using + the :class:`.MutableList` class:: + + from sqlalchemy import ARRAY + from sqlalchemy.ext.mutable import MutableList + + class SomeOrmClass(Base): + # ... + + data = Column(MutableList.as_mutable(ARRAY(Integer))) + + This extension will allow "in-place" changes such to the array + such as ``.append()`` to produce events which will be detected by the + unit of work. Note that changes to elements **inside** the array, + including subarrays that are mutated in place, are **not** detected. + + Alternatively, assigning a new array value to an ORM element that + replaces the old one will always trigger a change event. + .. versionadded:: 1.1.0 .. seealso:: From dce7050c1b3f1e8735070026e2d73abe70cb1f21 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 30 Aug 2022 10:25:47 -0400 Subject: [PATCH 355/632] implement event for merge/load=False for mutable state setup Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection links to the parent object would be lost if the object were merged with :meth:`.Session.merge` while also passing :paramref:`.Session.merge.load` as False. The event added here is currently private for expediency, but is acceptable to become a public event at some point. Fixes: #8446 Change-Id: I9e5b9f1f5a0c5a9781f51635d5e57b1134c9e866 (cherry picked from commit e15cf451affdef95b3248d1ea5c31ac923e661c3) --- doc/build/changelog/unreleased_14/8446.rst | 8 ++++ lib/sqlalchemy/ext/mutable.py | 8 ++++ lib/sqlalchemy/orm/events.py | 17 ++++++++ lib/sqlalchemy/orm/session.py | 3 ++ test/ext/test_mutable.py | 45 ++++++++++++++++++++++ 5 files changed, 81 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8446.rst diff --git a/doc/build/changelog/unreleased_14/8446.rst b/doc/build/changelog/unreleased_14/8446.rst new file mode 100644 index 00000000000..9f4cdfddd6c --- /dev/null +++ b/doc/build/changelog/unreleased_14/8446.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, ext + :tickets: 8446 + + Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection + links to the parent object would be lost if the object were merged with + :meth:`.Session.merge` while also passing :paramref:`.Session.merge.load` + as False. diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index cbec06a31fe..45c96178a65 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -511,6 +511,14 @@ def unpickle(state, state_dict): for val in state_dict["ext.mutable.values"][key]: val._parents[state] = key + event.listen( + parent_cls, + "_sa_event_merge_wo_load", + load, + raw=True, + propagate=True, + ) + event.listen(parent_cls, "load", load, raw=True, propagate=True) event.listen( parent_cls, "refresh", load_attrs, raw=True, propagate=True diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 39659c72325..adff448f504 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -325,6 +325,23 @@ def init_failure(self, target, args, kwargs): """ + def _sa_event_merge_wo_load(self, target, context): + """receive an object instance after it was the subject of a merge() + call, when load=False was passed. + + The target would be the already-loaded object in the Session which + would have had its attributes overwritten by the incoming object. This + overwrite operation does not use attribute events, instead just + populating dict directly. Therefore the purpose of this event is so + that extensions like sqlalchemy.ext.mutable know that object state has + changed and incoming state needs to be set up for "parents" etc. + + This functionality is acceptable to be made public in a later release. + + .. versionadded:: 1.4.41 + + """ + def load(self, target, context): """Receive an object instance after it has been created via ``__new__``, and after initial attribute population has diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index c6a91693e30..96a273a3598 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -3151,6 +3151,9 @@ def _merge( if not load: # remove any history merged_state._commit_all(merged_dict, self.identity_map) + merged_state.manager.dispatch._sa_event_merge_wo_load( + merged_state, None + ) if new_instance: merged_state.manager.dispatch.load(merged_state, None) diff --git a/test/ext/test_mutable.py b/test/ext/test_mutable.py index ff167b25365..70b076c55ea 100644 --- a/test/ext/test_mutable.py +++ b/test/ext/test_mutable.py @@ -6,6 +6,7 @@ from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import JSON from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing @@ -26,6 +27,7 @@ from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column @@ -160,6 +162,49 @@ def test_pickle_parent_multi_attrs(self, registry, connection, pickleit): b = ((), [data[other_attr]], ()) eq_(a, b) + @testing.combinations("key_present", "key_non_present", argnames="present") + @testing.combinations( + ("transient", True), + ("detached", True), + ("detached", False), + argnames="merge_subject, load", + ) + @testing.requires.json_type + def test_session_merge( + self, decl_base, connection, present, load, merge_subject + ): + """test #8446""" + + class Thing(decl_base): + __tablename__ = "thing" + id = Column(Integer, primary_key=True) + data = Column(MutableDict.as_mutable(JSON)) + + decl_base.metadata.create_all(connection) + + with Session(connection) as sess: + sess.add(Thing(id=1, data={"foo": "bar"})) + sess.commit() + + if merge_subject == "transient": + t1_to_merge = Thing(id=1, data={"foo": "bar"}) + elif merge_subject == "detached": + with Session(connection) as sess: + t1_to_merge = sess.get(Thing, 1) + + with Session(connection) as sess: + already_present = None + if present == "key_present": + already_present = sess.get(Thing, 1) + + t1_merged = sess.merge(t1_to_merge, load=load) + + t1_merged.data["foo"] = "bat" + if present == "key_present": + is_(t1_merged, already_present) + + is_true(inspect(t1_merged).attrs.data.history.added) + class _MutableDictTestBase(_MutableDictTestFixture): run_define_tables = "each" From 12547f963363eefbc9e22bc9b12243971551d89e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 30 Aug 2022 10:47:24 -0400 Subject: [PATCH 356/632] include TableClause.schema in cache key Fixed issue where use of the :func:`_sql.table` construct, passing a string for the :paramref:`_sql.table.schema` parameter, would fail to take the "schema" string into account when producing a cache key, thus leading to caching collisions if multiple, same-named :func:`_sql.table` constructs with different schemas were used. Fixes: #8441 Change-Id: Ic4b55b3e8ec53b4c88ba112691bdf60ea1d4c448 (cherry picked from commit 613642d9639f47ad11ab62a3fa71f6132edbaa0d) --- doc/build/changelog/unreleased_14/8441.rst | 10 ++++++++++ lib/sqlalchemy/sql/selectable.py | 1 + test/sql/test_compare.py | 2 ++ 3 files changed, 13 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8441.rst diff --git a/doc/build/changelog/unreleased_14/8441.rst b/doc/build/changelog/unreleased_14/8441.rst new file mode 100644 index 00000000000..963850a109d --- /dev/null +++ b/doc/build/changelog/unreleased_14/8441.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sql + :tickets: 8441 + + Fixed issue where use of the :func:`_sql.table` construct, passing a string + for the :paramref:`_sql.table.schema` parameter, would fail to take the + "schema" string into account when producing a cache key, thus leading to + caching collisions if multiple, same-named :func:`_sql.table` constructs + with different schemas were used. + diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 8379e1ca735..95e13f0810d 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -2685,6 +2685,7 @@ class TableClause(roles.DMLTableRole, Immutable, FromClause): InternalTraversal.dp_fromclause_canonical_column_collection, ), ("name", InternalTraversal.dp_string), + ("schema", InternalTraversal.dp_string), ] named_with_column = True diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index 26340d21d45..f73e9864d37 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -266,6 +266,8 @@ class CoreFixtures(object): ), lambda: ( table("a", column("x"), column("y")), + table("a", column("x"), column("y"), schema="q"), + table("a", column("x"), column("y"), schema="y"), table("a", column("x"), column("y"))._annotate({"orm": True}), table("b", column("x"), column("y"))._annotate({"orm": True}), ), From af09ab014e0be51c702e407ceb9b122474140bc8 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 29 Aug 2022 20:57:01 +0200 Subject: [PATCH 357/632] restore test concurrency try 2 Change-Id: I54730f9683a1de3f1379ca8d2a1cab8c485e7bcc (cherry picked from commit d8e135a474bdc2147786ec8919facc8f6fdf84f6) --- test/ext/asyncio/test_engine_py3k.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 673eff2fb19..7875b9aec4c 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -1,3 +1,4 @@ +import asyncio import inspect as stdlib_inspect from sqlalchemy import Column @@ -487,8 +488,8 @@ async def test_dispose(self, async_engine): @async_test async def test_init_once_concurrency(self, async_engine): async with async_engine.connect() as c1, async_engine.connect() as c2: - eq_(await c1.scalar(select(1)), 1) - eq_(await c2.scalar(select(1)), 1) + coro = asyncio.gather(c1.scalar(select(1)), c2.scalar(select(2))) + eq_(await coro, [1, 2]) @async_test async def test_connect_ctxmanager(self, async_engine): From 7883009078f539cac92eabbd1f995939edaeaa44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20D=C6=B0=C6=A1ng?= Date: Tue, 30 Aug 2022 18:05:25 -0300 Subject: [PATCH 358/632] Fix doc snippet (#8414) (cherry picked from commit ec65def6bffa94d1c89ae5896e4d7e85f9abe84a) --- doc/build/core/defaults.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index 6c3d3ed7c7c..bccc8375c1d 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -658,7 +658,7 @@ shares most of its option to control the database behaviour with Example:: - from sqlalchemy import Table, Column, MetaData, Integer, Computed + from sqlalchemy import Table, Column, MetaData, Integer, Identity, String metadata_obj = MetaData() From ba2392409c8024367da71f819417f5f6c3a2a2e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9s=20=C3=81lvarez?= Date: Fri, 26 Aug 2022 10:55:12 -0600 Subject: [PATCH 359/632] Fix typo in 'ORM Querying Guide' docs (cherry picked from commit 087efa784d4ba08118dc9a34a765afd5174f4287) --- doc/build/orm/queryguide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 2a575354c60..8fa2adc0c82 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -381,7 +381,7 @@ Selecting Entities from Subqueries ----------------------------------- The :func:`_orm.aliased` construct discussed in the previous section -can be used with any :class:`_sql.Subuqery` construct that comes from a +can be used with any :class:`_sql.Subquery` construct that comes from a method such as :meth:`_sql.Select.subquery` to link ORM entities to the columns returned by that subquery; there must be a **column correspondence** relationship between the columns delivered by the subquery and the columns From 3c68c7c0341ac41b11185491cf2165336dfed1de Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 31 Aug 2022 11:07:23 -0400 Subject: [PATCH 360/632] run update_subclass anytime we add new clslevel dispatch Fixed event listening issue where event listeners added to a superclass would be lost if a subclass were created which then had its own listeners associated. The practical example is that of the :class:`.sessionmaker` class created after events have been associated with the :class:`_orm.Session` class. Fixes: #8467 Change-Id: I9bdba8769147e30110a09900d4a577e833ac3af9 (cherry picked from commit d3e0b8e750d864766148cdf1a658a601079eed46) --- doc/build/changelog/unreleased_14/8467.rst | 9 +++++ lib/sqlalchemy/event/attr.py | 45 +++++++++------------- test/base/test_events.py | 29 ++++++++++++++ test/orm/test_events.py | 29 ++++++++++++++ 4 files changed, 85 insertions(+), 27 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8467.rst diff --git a/doc/build/changelog/unreleased_14/8467.rst b/doc/build/changelog/unreleased_14/8467.rst new file mode 100644 index 00000000000..7626f50a394 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8467.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, events, orm + :tickets: 8467 + + Fixed event listening issue where event listeners added to a superclass + would be lost if a subclass were created which then had its own listeners + associated. The practical example is that of the :class:`.sessionmaker` + class created after events have been associated with the + :class:`_orm.Session` class. diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index 0d16165c4ee..09b5a2267f0 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -118,14 +118,14 @@ def wrap_kw(*args, **kw): return wrap_kw - def insert(self, event_key, propagate): + def _do_insert_or_append(self, event_key, is_append): target = event_key.dispatch_target assert isinstance( target, type ), "Class-level Event targets must be classes." if not getattr(target, "_sa_propagate_class_events", True): raise exc.InvalidRequestError( - "Can't assign an event directly to the %s class" % target + "Can't assign an event directly to the %s class" % (target,) ) for cls in util.walk_subclasses(target): @@ -133,38 +133,28 @@ def insert(self, event_key, propagate): self.update_subclass(cls) else: if cls not in self._clslevel: - self._assign_cls_collection(cls) - self._clslevel[cls].appendleft(event_key._listen_fn) + self.update_subclass(cls) + if is_append: + self._clslevel[cls].append(event_key._listen_fn) + else: + self._clslevel[cls].appendleft(event_key._listen_fn) registry._stored_in_collection(event_key, self) - def append(self, event_key, propagate): - target = event_key.dispatch_target - assert isinstance( - target, type - ), "Class-level Event targets must be classes." - if not getattr(target, "_sa_propagate_class_events", True): - raise exc.InvalidRequestError( - "Can't assign an event directly to the %s class" % target - ) - for cls in util.walk_subclasses(target): - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._assign_cls_collection(cls) - self._clslevel[cls].append(event_key._listen_fn) - registry._stored_in_collection(event_key, self) + def insert(self, event_key, propagate): + self._do_insert_or_append(event_key, is_append=False) - def _assign_cls_collection(self, target): - if getattr(target, "_sa_propagate_class_events", True): - self._clslevel[target] = collections.deque() - else: - self._clslevel[target] = _empty_collection() + def append(self, event_key, propagate): + self._do_insert_or_append(event_key, is_append=True) def update_subclass(self, target): if target not in self._clslevel: - self._assign_cls_collection(target) + if getattr(target, "_sa_propagate_class_events", True): + self._clslevel[target] = collections.deque() + else: + self._clslevel[target] = _empty_collection() + clslevel = self._clslevel[target] + for cls in target.__mro__[1:]: if cls in self._clslevel: clslevel.extend( @@ -173,6 +163,7 @@ def update_subclass(self, target): def remove(self, event_key): target = event_key.dispatch_target + for cls in util.walk_subclasses(target): if cls in self._clslevel: self._clslevel[cls].remove(event_key._listen_fn) diff --git a/test/base/test_events.py b/test/base/test_events.py index 4409d6b2947..e8ed0ff3628 100644 --- a/test/base/test_events.py +++ b/test/base/test_events.py @@ -677,6 +677,35 @@ def handler2(x, y): eq_(len(SubTarget().dispatch.event_one), 2) + @testing.combinations(True, False, argnames="m1") + @testing.combinations(True, False, argnames="m2") + @testing.combinations(True, False, argnames="m3") + @testing.combinations(True, False, argnames="use_insert") + def test_subclass_gen_after_clslisten(self, m1, m2, m3, use_insert): + """test #8467""" + m1 = Mock() if m1 else None + m2 = Mock() if m2 else None + m3 = Mock() if m3 else None + + if m1: + event.listen(self.TargetOne, "event_one", m1, insert=use_insert) + + class SubTarget(self.TargetOne): + pass + + if m2: + event.listen(SubTarget, "event_one", m2, insert=use_insert) + + if m3: + event.listen(self.TargetOne, "event_one", m3, insert=use_insert) + + st = SubTarget() + st.dispatch.event_one() + + for m in m1, m2, m3: + if m: + eq_(m.mock_calls, [call()]) + def test_lis_multisub_lis(self): @event.listens_for(self.TargetOne, "event_one") def handler1(x, y): diff --git a/test/orm/test_events.py b/test/orm/test_events.py index 4009dc3aecb..50265510042 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -2078,6 +2078,35 @@ def my_listener(*arg, **kw): s = fixture_session() assert my_listener in s.dispatch.before_flush + @testing.combinations(True, False, argnames="m1") + @testing.combinations(True, False, argnames="m2") + @testing.combinations(True, False, argnames="m3") + @testing.combinations(True, False, argnames="use_insert") + def test_sessionmaker_gen_after_session_listen( + self, m1, m2, m3, use_insert + ): + m1 = Mock() if m1 else None + m2 = Mock() if m2 else None + m3 = Mock() if m3 else None + + if m1: + event.listen(Session, "before_flush", m1, insert=use_insert) + + factory = sessionmaker() + + if m2: + event.listen(factory, "before_flush", m2, insert=use_insert) + + if m3: + event.listen(factory, "before_flush", m3, insert=use_insert) + + st = factory() + st.dispatch.before_flush() + + for m in m1, m2, m3: + if m: + eq_(m.mock_calls, [call()]) + def test_sessionmaker_listen(self): """test that listen can be applied to individual scoped_session() classes.""" From 915d9686eb80f55491e2b480ddad7cdfbd612609 Mon Sep 17 00:00:00 2001 From: bkcsfi Date: Wed, 31 Aug 2022 16:42:47 -0400 Subject: [PATCH 361/632] fix minor typo subuqeries -> subqueries (cherry picked from commit 41268fa6352c514ba2d5f0b3ecc16019a63547b6) --- doc/build/orm/queryguide.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 8fa2adc0c82..9fcd2c1bc04 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -702,7 +702,7 @@ Joining to Subqueries ^^^^^^^^^^^^^^^^^^^^^^^ The target of a join may be any "selectable" entity which usefully includes -subuqeries. When using the ORM, it is typical +subqueries. When using the ORM, it is typical that these targets are stated in terms of an :func:`_orm.aliased` construct, but this is not strictly required particularly if the joined entity is not being returned in the results. For example, to join from the From 966b86f86460eacf4a227ba5727ab434cdef040a Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Thu, 1 Sep 2022 11:10:20 -0600 Subject: [PATCH 362/632] Fix Azure Synapse connection error Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 where connection would fail if the user does not have permission to query the dm_exec_sessions or dm_pdw_nodes_exec_sessions system view when trying to determine the current transaction isolation level. Fixes: #8475 Change-Id: Ie2bcda92f2ef2d12360ddda47eb6e896313c71f2 (cherry picked from commit 645977088404da0ed6d72ae7638a7d23dcf1e8e7) --- doc/build/changelog/unreleased_14/8475.rst | 8 ++++ lib/sqlalchemy/dialects/mssql/base.py | 49 ++++++++++++++-------- test/dialect/mssql/test_engine.py | 20 ++++++++- 3 files changed, 58 insertions(+), 19 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8475.rst diff --git a/doc/build/changelog/unreleased_14/8475.rst b/doc/build/changelog/unreleased_14/8475.rst new file mode 100644 index 00000000000..22fc3f2dd6b --- /dev/null +++ b/doc/build/changelog/unreleased_14/8475.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mssql, regression + :tickets: 8475 + + Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 + where connection would fail if the user does not have permission to query + the dm_exec_sessions or dm_pdw_nodes_exec_sessions system view when trying + to determine the current transaction isolation level. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index ee6ce87696d..0d0a4b8f5e0 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2842,27 +2842,40 @@ def get_isolation_level(self, dbapi_connection): ) view_name = "sys.{}".format(row[0]) - cursor.execute( - """ - SELECT CASE transaction_isolation_level - WHEN 0 THEN NULL - WHEN 1 THEN 'READ UNCOMMITTED' - WHEN 2 THEN 'READ COMMITTED' - WHEN 3 THEN 'REPEATABLE READ' - WHEN 4 THEN 'SERIALIZABLE' - WHEN 5 THEN 'SNAPSHOT' END AS TRANSACTION_ISOLATION_LEVEL - FROM {} - where session_id = @@SPID - """.format( - view_name + + try: + cursor.execute( + """ + SELECT CASE transaction_isolation_level + WHEN 0 THEN NULL + WHEN 1 THEN 'READ UNCOMMITTED' + WHEN 2 THEN 'READ COMMITTED' + WHEN 3 THEN 'REPEATABLE READ' + WHEN 4 THEN 'SERIALIZABLE' + WHEN 5 THEN 'SNAPSHOT' END + AS TRANSACTION_ISOLATION_LEVEL + FROM {} + where session_id = @@SPID + """.format( + view_name + ) ) - ) - row = cursor.fetchone() - assert row is not None - val = row[0] + except self.dbapi.Error as err: + util.raise_( + NotImplementedError( + "Can't fetch isolation level; encountered " + "error {} when " + 'attempting to query the "{}" view.'.format( + err, view_name + ) + ), + from_=err, + ) + else: + row = cursor.fetchone() + return row[0].upper() finally: cursor.close() - return val.upper() def initialize(self, connection): super(MSDialect, self).initialize(connection) diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index af8db861611..ea201a1aa0b 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -649,7 +649,7 @@ def test_isolation_level(self, metadata): class IsolationLevelDetectTest(fixtures.TestBase): - def _fixture(self, view_result): + def _fixture(self, view_result, simulate_perm_failure=False): class Error(Exception): pass @@ -672,6 +672,10 @@ def fail_on_exec( stmt, re.S, ): + if simulate_perm_failure: + raise dialect.dbapi.Error( + "SQL Server simulated permission error" + ) result.append(("SERIALIZABLE",)) else: assert False @@ -707,6 +711,20 @@ def test_not_supported(self): connection, ) + def test_dont_have_table_perms(self): + dialect, connection = self._fixture( + "dm_pdw_nodes_exec_sessions", simulate_perm_failure=True + ) + + assert_raises_message( + NotImplementedError, + r"Can\'t fetch isolation level; encountered error SQL Server " + r"simulated permission error when attempting to query the " + r'"sys.dm_pdw_nodes_exec_sessions" view.', + dialect.get_isolation_level, + connection, + ) + class InvalidTransactionFalsePositiveTest(fixtures.TablesTest): __only_on__ = "mssql" From 1ce530c0b25bc8a7d09b97bf300aba55d2603892 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 6 Sep 2022 20:49:15 -0400 Subject: [PATCH 363/632] changelog fixes Change-Id: Ie0217dfea32d08abf7934950b1e1b381e612bb56 --- doc/build/changelog/unreleased_14/8446.rst | 2 +- doc/build/changelog/unreleased_14/8456.rst | 4 ++-- doc/build/changelog/unreleased_14/8475.rst | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8446.rst b/doc/build/changelog/unreleased_14/8446.rst index 9f4cdfddd6c..7366315a936 100644 --- a/doc/build/changelog/unreleased_14/8446.rst +++ b/doc/build/changelog/unreleased_14/8446.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, ext + :tags: bug, orm :tickets: 8446 Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection diff --git a/doc/build/changelog/unreleased_14/8456.rst b/doc/build/changelog/unreleased_14/8456.rst index ca769fd3424..6f0e846510c 100644 --- a/doc/build/changelog/unreleased_14/8456.rst +++ b/doc/build/changelog/unreleased_14/8456.rst @@ -6,7 +6,7 @@ query placed as a subquery within an enclosing query for that same entity would fail to render the JOIN correctly for the inner query. The issue manifested in two different ways prior and subsequent to version 1.4.18 - (related issue #6595), in one case rendering JOIN twice, in the other - losing the JOIN entirely. To resolve, the conditions under which + (related issue :ticket:`6595`), in one case rendering JOIN twice, in the + other losing the JOIN entirely. To resolve, the conditions under which "polymorphic loading" are applied have been scaled back to not be invoked for simple joined inheritance queries. diff --git a/doc/build/changelog/unreleased_14/8475.rst b/doc/build/changelog/unreleased_14/8475.rst index 22fc3f2dd6b..bec64c6a4ab 100644 --- a/doc/build/changelog/unreleased_14/8475.rst +++ b/doc/build/changelog/unreleased_14/8475.rst @@ -3,6 +3,6 @@ :tickets: 8475 Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 - where connection would fail if the user does not have permission to query - the dm_exec_sessions or dm_pdw_nodes_exec_sessions system view when trying - to determine the current transaction isolation level. + where connection would fail if the user did not have permission to query + the ``dm_exec_sessions`` or ``dm_pdw_nodes_exec_sessions`` system views + when trying to determine the current transaction isolation level. From ac9a74ac0d60f5e95d9dbbce7f804f532bef2bea Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 6 Sep 2022 20:53:28 -0400 Subject: [PATCH 364/632] - 1.4.41 --- doc/build/changelog/changelog_14.rst | 86 +++++++++++++++++++++- doc/build/changelog/unreleased_14/8399.rst | 10 --- doc/build/changelog/unreleased_14/8401.rst | 9 --- doc/build/changelog/unreleased_14/8419.rst | 10 --- doc/build/changelog/unreleased_14/8441.rst | 10 --- doc/build/changelog/unreleased_14/8446.rst | 8 -- doc/build/changelog/unreleased_14/8456.rst | 12 --- doc/build/changelog/unreleased_14/8467.rst | 9 --- doc/build/changelog/unreleased_14/8475.rst | 8 -- doc/build/conf.py | 4 +- 10 files changed, 87 insertions(+), 79 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8399.rst delete mode 100644 doc/build/changelog/unreleased_14/8401.rst delete mode 100644 doc/build/changelog/unreleased_14/8419.rst delete mode 100644 doc/build/changelog/unreleased_14/8441.rst delete mode 100644 doc/build/changelog/unreleased_14/8446.rst delete mode 100644 doc/build/changelog/unreleased_14/8456.rst delete mode 100644 doc/build/changelog/unreleased_14/8467.rst delete mode 100644 doc/build/changelog/unreleased_14/8475.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 68cf42463d2..ce7819a099d 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,91 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.41 - :include_notes_from: unreleased_14 + :released: September 6, 2022 + + .. change:: + :tags: bug, sql + :tickets: 8441 + + Fixed issue where use of the :func:`_sql.table` construct, passing a string + for the :paramref:`_sql.table.schema` parameter, would fail to take the + "schema" string into account when producing a cache key, thus leading to + caching collisions if multiple, same-named :func:`_sql.table` constructs + with different schemas were used. + + + .. change:: + :tags: bug, events, orm + :tickets: 8467 + + Fixed event listening issue where event listeners added to a superclass + would be lost if a subclass were created which then had its own listeners + associated. The practical example is that of the :class:`.sessionmaker` + class created after events have been associated with the + :class:`_orm.Session` class. + + .. change:: + :tags: orm, bug + :tickets: 8401 + + Hardened the cache key strategy for the :func:`_orm.aliased` and + :func:`_orm.with_polymorphic` constructs. While no issue involving actual + statements being cached can easily be demonstrated (if at all), these two + constructs were not including enough of what makes them unique in their + cache keys for caching on the aliased construct alone to be accurate. + + .. change:: + :tags: bug, orm, regression + :tickets: 8456 + + Fixed regression appearing in the 1.4 series where a joined-inheritance + query placed as a subquery within an enclosing query for that same entity + would fail to render the JOIN correctly for the inner query. The issue + manifested in two different ways prior and subsequent to version 1.4.18 + (related issue :ticket:`6595`), in one case rendering JOIN twice, in the + other losing the JOIN entirely. To resolve, the conditions under which + "polymorphic loading" are applied have been scaled back to not be invoked + for simple joined inheritance queries. + + .. change:: + :tags: bug, orm + :tickets: 8446 + + Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection + links to the parent object would be lost if the object were merged with + :meth:`.Session.merge` while also passing :paramref:`.Session.merge.load` + as False. + + .. change:: + :tags: bug, orm + :tickets: 8399 + + Fixed issue involving :func:`_orm.with_loader_criteria` where a closure + variable used as bound parameter value within the lambda would not carry + forward correctly into additional relationship loaders such as + :func:`_orm.selectinload` and :func:`_orm.lazyload` after the statement + were cached, using the stale originally-cached value instead. + + + .. change:: + :tags: bug, mssql, regression + :tickets: 8475 + + Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 + where connection would fail if the user did not have permission to query + the ``dm_exec_sessions`` or ``dm_pdw_nodes_exec_sessions`` system views + when trying to determine the current transaction isolation level. + + .. change:: + :tags: bug, asyncio + :tickets: 8419 + + Integrated support for asyncpg's ``terminate()`` method call for cases + where the connection pool is recycling a possibly timed-out connection, + where a connection is being garbage collected that wasn't gracefully + closed, as well as when the connection has been invalidated. This allows + asyncpg to abandon the connection without waiting for a response that may + incur long timeouts. .. changelog:: :version: 1.4.40 diff --git a/doc/build/changelog/unreleased_14/8399.rst b/doc/build/changelog/unreleased_14/8399.rst deleted file mode 100644 index aea9e523816..00000000000 --- a/doc/build/changelog/unreleased_14/8399.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8399 - - Fixed issue involving :func:`_orm.with_loader_criteria` where a closure - variable used as bound parameter value within the lambda would not carry - forward correctly into additional relationship loaders such as - :func:`_orm.selectinload` and :func:`_orm.lazyload` after the statement - were cached, using the stale originally-cached value instead. - diff --git a/doc/build/changelog/unreleased_14/8401.rst b/doc/build/changelog/unreleased_14/8401.rst deleted file mode 100644 index 119c6cff1a0..00000000000 --- a/doc/build/changelog/unreleased_14/8401.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: orm, bug - :tickets: 8401 - - Hardened the cache key strategy for the :func:`_orm.aliased` and - :func:`_orm.with_polymorphic` constructs. While no issue involving actual - statements being cached can easily be demonstrated (if at all), these two - constructs were not including enough of what makes them unique in their - cache keys for caching on the aliased construct alone to be accurate. diff --git a/doc/build/changelog/unreleased_14/8419.rst b/doc/build/changelog/unreleased_14/8419.rst deleted file mode 100644 index a095d858d2e..00000000000 --- a/doc/build/changelog/unreleased_14/8419.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 8419 - - Integrated support for asyncpg's ``terminate()`` method call for cases - where the connection pool is recycling a possibly timed-out connection, - where a connection is being garbage collected that wasn't gracefully - closed, as well as when the connection has been invalidated. This allows - asyncpg to abandon the connection without waiting for a response that may - incur long timeouts. diff --git a/doc/build/changelog/unreleased_14/8441.rst b/doc/build/changelog/unreleased_14/8441.rst deleted file mode 100644 index 963850a109d..00000000000 --- a/doc/build/changelog/unreleased_14/8441.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8441 - - Fixed issue where use of the :func:`_sql.table` construct, passing a string - for the :paramref:`_sql.table.schema` parameter, would fail to take the - "schema" string into account when producing a cache key, thus leading to - caching collisions if multiple, same-named :func:`_sql.table` constructs - with different schemas were used. - diff --git a/doc/build/changelog/unreleased_14/8446.rst b/doc/build/changelog/unreleased_14/8446.rst deleted file mode 100644 index 7366315a936..00000000000 --- a/doc/build/changelog/unreleased_14/8446.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8446 - - Fixed issue in :mod:`sqlalchemy.ext.mutable` extension where collection - links to the parent object would be lost if the object were merged with - :meth:`.Session.merge` while also passing :paramref:`.Session.merge.load` - as False. diff --git a/doc/build/changelog/unreleased_14/8456.rst b/doc/build/changelog/unreleased_14/8456.rst deleted file mode 100644 index 6f0e846510c..00000000000 --- a/doc/build/changelog/unreleased_14/8456.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 8456 - - Fixed regression appearing in the 1.4 series where a joined-inheritance - query placed as a subquery within an enclosing query for that same entity - would fail to render the JOIN correctly for the inner query. The issue - manifested in two different ways prior and subsequent to version 1.4.18 - (related issue :ticket:`6595`), in one case rendering JOIN twice, in the - other losing the JOIN entirely. To resolve, the conditions under which - "polymorphic loading" are applied have been scaled back to not be invoked - for simple joined inheritance queries. diff --git a/doc/build/changelog/unreleased_14/8467.rst b/doc/build/changelog/unreleased_14/8467.rst deleted file mode 100644 index 7626f50a394..00000000000 --- a/doc/build/changelog/unreleased_14/8467.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, events, orm - :tickets: 8467 - - Fixed event listening issue where event listeners added to a superclass - would be lost if a subclass were created which then had its own listeners - associated. The practical example is that of the :class:`.sessionmaker` - class created after events have been associated with the - :class:`_orm.Session` class. diff --git a/doc/build/changelog/unreleased_14/8475.rst b/doc/build/changelog/unreleased_14/8475.rst deleted file mode 100644 index bec64c6a4ab..00000000000 --- a/doc/build/changelog/unreleased_14/8475.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mssql, regression - :tickets: 8475 - - Fixed regression caused by the fix for :ticket:`8231` released in 1.4.40 - where connection would fail if the user did not have permission to query - the ``dm_exec_sessions`` or ``dm_pdw_nodes_exec_sessions`` system views - when trying to determine the current transaction isolation level. diff --git a/doc/build/conf.py b/doc/build/conf.py index 33ee77319ed..3a236ffbc7f 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.40" +release = "1.4.41" -release_date = "August 8, 2022" +release_date = "September 6, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 147b66c82150549562b8d6a8e1146a75e5723b5c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 6 Sep 2022 20:58:37 -0400 Subject: [PATCH 365/632] Version 1.4.42 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index ce7819a099d..060d964a148 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.42 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.41 :released: September 6, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 665c0491197..772a60d75ec 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.41" +__version__ = "1.4.42" def __go(lcls): From e580d217adfb9617801f521413fb00a9f28e631d Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 7 Sep 2022 22:08:55 +0200 Subject: [PATCH 366/632] update workflows Change-Id: Iaec865386bb3e969efec3ac75dc27ead288eca5d --- .github/workflows/create-wheels.yaml | 27 ++++++++++++++------------- .github/workflows/run-on-pr.yaml | 14 +++++++------- .github/workflows/run-test.yaml | 14 +++++++------- 3 files changed, 28 insertions(+), 27 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 06999b60608..8353c0ec94b 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -46,10 +46,10 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -106,7 +106,7 @@ jobs: - name: Set up Python for twine # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: "3.8" @@ -152,7 +152,7 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Get python version id: linux-py-version @@ -162,6 +162,7 @@ jobs: # this is from https://github.community/t5/GitHub-Actions/Using-the-output-of-run-inside-of-if-condition/td-p/33920 run: | version="`echo $py_tag | sed --regexp-extended 's/cp([0-9])([0-9]+)-.*/\1.\2/g'`" + version=$([[ $version = "3.11" ]] && echo 3.11.0-rc - 3.11 || echo $version ) echo $version echo "::set-output name=python-version::$version" @@ -179,11 +180,11 @@ jobs: (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - name: Create wheel for manylinux1 and manylinux2010 for py3 - if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' }} # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2010_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2010_x86_64 # this action generates 3 wheels in dist/. linux, manylinux1 and manylinux2010 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -199,7 +200,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2014_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_x86_64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -215,7 +216,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux1_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux1_x86_64 # this action generates 2 wheels in dist/. linux and manylinux1 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -227,7 +228,7 @@ jobs: pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ steps.linux-py-version.outputs.python-version }} architecture: ${{ matrix.architecture }} @@ -260,7 +261,7 @@ jobs: - name: Set up Python for twine # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: "3.8" @@ -299,7 +300,7 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Remove tag_build from setup.cfg # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel @@ -322,7 +323,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2014 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.3.4-manylinux2014_aarch64 + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_aarch64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu @@ -358,7 +359,7 @@ jobs: - name: Set up Python for twine # Setup python after creating the wheel, otherwise LD_LIBRARY_PATH gets set and it will break wheel generation # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: "3.8" diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 352eec3abbf..087f1bc3320 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -38,10 +38,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -71,10 +71,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -104,10 +104,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -136,7 +136,7 @@ jobs: # steps: # - name: Checkout repo - # uses: actions/checkout@v2 + # uses: actions/checkout@v3 # - name: Set up emulation # run: | diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index fd77e3987ff..1c97f64bc1d 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -78,10 +78,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -115,7 +115,7 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up emulation run: | @@ -154,10 +154,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} @@ -187,10 +187,10 @@ jobs: # steps to run in each job. Some are github actions, others run shell commands steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Set up python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} architecture: ${{ matrix.architecture }} From 3b8bcbcac66d814d15ec44c86c30d9c48ec40c27 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 7 Sep 2022 19:00:31 -0400 Subject: [PATCH 367/632] add docs for session.get() also use the term "primary key" a bit more Change-Id: Ib654b30a9d06a2aeed019b4754db920afe05d774 References: https://twitter.com/encthenet/status/1567644850471989248 (cherry picked from commit cc72081b0c32dbd089fb9601747f448b65414640) --- doc/build/orm/session_basics.rst | 31 ++++++++++++++++++-- doc/build/tutorial/orm_data_manipulation.rst | 4 +-- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index b747246c042..2815492dd50 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -500,6 +500,29 @@ so that the overall nesting pattern of so-called "subtransactions" is consistently maintained. The FAQ section :ref:`faq_session_rollback` contains a more detailed description of this behavior. +.. _session_get: + +Get by Primary Key +------------------ + +As the :class:`_orm.Session` makes use of an :term:`identity map` which refers +to current in-memory objects by primary key, the :meth:`_orm.Session.get` +method is provided as a means of locating objects by primary key, first +looking within the current identity map and then querying the database +for non present values. Such as, to locate a ``User`` entity with primary key +identity ``(5, )``:: + + my_user = session.get(User, 5) + +The :meth:`_orm.Session.get` also includes calling forms for composite primary +key values, which may be passed as tuples or dictionaries, as well as +additional parameters which allow for specific loader and execution options. +See :meth:`_orm.Session.get` for the complete parameter list. + +.. seealso:: + + :meth:`_orm.Session.get` + .. _session_expiring: Expiring / Refreshing @@ -552,9 +575,11 @@ ways to refresh its contents with new data from the current transaction: .. -* **the populate_existing() method** - this method is actually on the - :class:`_orm.Query` object as :meth:`_orm.Query.populate_existing` - and indicates that it should return objects that are unconditionally +* **the populate_existing() method or execution option** - This is now + an execution option documented at :ref:`orm_queryguide_populate_existing`; in + legacy form it's found on the :class:`_orm.Query` object as the + :meth:`_orm.Query.populate_existing` method. This operation in either form + indicates that objects being returned from a query should be unconditionally re-populated from their contents in the database:: u2 = session.query(User).populate_existing().filter(id=5).first() diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index ca955d02377..b0b67f53c4b 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -173,8 +173,8 @@ the ``id`` attribute:: INSERT many rows at once while still being able to retrieve the primary key values. -Identity Map -^^^^^^^^^^^^ +Getting Objects by Primary Key from the Identity Map +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The primary key identity of the objects are significant to the :class:`_orm.Session`, as the objects are now linked to this identity in memory using a feature From 647aafc875641d4d2e1ad2953affb0eec52e71dc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 14 Sep 2022 23:09:08 -0400 Subject: [PATCH 368/632] add missing doc for mariadb-connector this module was never indexed. kind of a major oversight Change-Id: I4389a2ca3900edc70130fbae66195605a5704362 (cherry picked from commit 310dd2e6a741c606e5be40ef35cac6ed63e10bfe) --- doc/build/dialects/mysql.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index 64a6f45f968..c506a5fa43b 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -194,6 +194,11 @@ PyMySQL .. automodule:: sqlalchemy.dialects.mysql.pymysql +MariaDB-Connector +------------------ + +.. automodule:: sqlalchemy.dialects.mysql.mariadbconnector + MySQL-Connector --------------- From 8f1d47df752db35009269b7edd6da59805230c3b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 15 Sep 2022 08:42:34 -0400 Subject: [PATCH 369/632] catch exception for system_views also Fixed yet another regression in SQL Server isolation level fetch (see :ticket:`8231`, :ticket:`8475`), this time with "Microsoft Dynamics CRM Database via Azure Active Directory", which apparently lacks the ``system_views`` view entirely. Error catching has been extended that under no circumstances will this method ever fail, provided database connectivity is present. Fixes: #8525 Change-Id: I76a429e3329926069a0367d2e77ca1124b9a059d (cherry picked from commit 0ee7d693b805c0f1aea0da5ebc11ea6e52b42c71) --- doc/build/changelog/unreleased_14/8525.rst | 10 ++++ lib/sqlalchemy/dialects/mssql/base.py | 63 +++++++++++----------- test/dialect/mssql/test_engine.py | 33 ++++++++++-- 3 files changed, 71 insertions(+), 35 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8525.rst diff --git a/doc/build/changelog/unreleased_14/8525.rst b/doc/build/changelog/unreleased_14/8525.rst new file mode 100644 index 00000000000..3031ec378c5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8525.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, mssql, regression + :tickets: 8525 + + Fixed yet another regression in SQL Server isolation level fetch (see + :ticket:`8231`, :ticket:`8475`), this time with "Microsoft Dynamics CRM + Database via Azure Active Directory", which apparently lacks the + ``system_views`` view entirely. Error catching has been extended that under + no circumstances will this method ever fail, provided database connectivity + is present. \ No newline at end of file diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 0d0a4b8f5e0..0c967b51670 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2829,10 +2829,13 @@ def set_isolation_level(self, connection, level): def get_isolation_level(self, dbapi_connection): cursor = dbapi_connection.cursor() + view_name = "sys.system_views" try: cursor.execute( - "SELECT name FROM sys.system_views WHERE name IN " - "('dm_exec_sessions', 'dm_pdw_nodes_exec_sessions')" + ( + "SELECT name FROM {} WHERE name IN " + "('dm_exec_sessions', 'dm_pdw_nodes_exec_sessions')" + ).format(view_name) ) row = cursor.fetchone() if not row: @@ -2843,37 +2846,33 @@ def get_isolation_level(self, dbapi_connection): view_name = "sys.{}".format(row[0]) - try: - cursor.execute( - """ - SELECT CASE transaction_isolation_level - WHEN 0 THEN NULL - WHEN 1 THEN 'READ UNCOMMITTED' - WHEN 2 THEN 'READ COMMITTED' - WHEN 3 THEN 'REPEATABLE READ' - WHEN 4 THEN 'SERIALIZABLE' - WHEN 5 THEN 'SNAPSHOT' END - AS TRANSACTION_ISOLATION_LEVEL - FROM {} - where session_id = @@SPID - """.format( - view_name - ) - ) - except self.dbapi.Error as err: - util.raise_( - NotImplementedError( - "Can't fetch isolation level; encountered " - "error {} when " - 'attempting to query the "{}" view.'.format( - err, view_name - ) - ), - from_=err, + cursor.execute( + """ + SELECT CASE transaction_isolation_level + WHEN 0 THEN NULL + WHEN 1 THEN 'READ UNCOMMITTED' + WHEN 2 THEN 'READ COMMITTED' + WHEN 3 THEN 'REPEATABLE READ' + WHEN 4 THEN 'SERIALIZABLE' + WHEN 5 THEN 'SNAPSHOT' END + AS TRANSACTION_ISOLATION_LEVEL + FROM {} + where session_id = @@SPID + """.format( + view_name ) - else: - row = cursor.fetchone() - return row[0].upper() + ) + except self.dbapi.Error as err: + util.raise_( + NotImplementedError( + "Can't fetch isolation level; encountered error {} when " + 'attempting to query the "{}" view.'.format(err, view_name) + ), + from_=err, + ) + else: + row = cursor.fetchone() + return row[0].upper() finally: cursor.close() diff --git a/test/dialect/mssql/test_engine.py b/test/dialect/mssql/test_engine.py index ea201a1aa0b..32068e504b7 100644 --- a/test/dialect/mssql/test_engine.py +++ b/test/dialect/mssql/test_engine.py @@ -649,7 +649,12 @@ def test_isolation_level(self, metadata): class IsolationLevelDetectTest(fixtures.TestBase): - def _fixture(self, view_result, simulate_perm_failure=False): + def _fixture( + self, + view_result, + simulate_perm_failure=False, + simulate_no_system_views=False, + ): class Error(Exception): pass @@ -664,8 +669,13 @@ def fail_on_exec( ): result[:] = [] if "SELECT name FROM sys.system_views" in stmt: - if view_result: - result.append((view_result,)) + if simulate_no_system_views: + raise dialect.dbapi.Error( + "SQL Server simulated no system_views error" + ) + else: + if view_result: + result.append((view_result,)) elif re.match( ".*SELECT CASE transaction_isolation_level.*FROM sys.%s" % (view_result,), @@ -711,6 +721,23 @@ def test_not_supported(self): connection, ) + @testing.combinations(True, False) + def test_no_system_views(self, simulate_perm_failure_also): + dialect, connection = self._fixture( + "dm_pdw_nodes_exec_sessions", + simulate_perm_failure=simulate_perm_failure_also, + simulate_no_system_views=True, + ) + + assert_raises_message( + NotImplementedError, + r"Can\'t fetch isolation level; encountered error SQL Server " + r"simulated no system_views error when attempting to query the " + r'"sys.system_views" view.', + dialect.get_isolation_level, + connection, + ) + def test_dont_have_table_perms(self): dialect, connection = self._fixture( "dm_pdw_nodes_exec_sessions", simulate_perm_failure=True From 7c63437219aa361c50ef2de4b0e34bc670e87243 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 13 Sep 2022 11:00:46 -0400 Subject: [PATCH 370/632] Add type awareness to evaluator Fixed regression where using ORM update() with synchronize_session='fetch' would fail due to the use of evaluators that are now used to determine the in-Python value for expressions in the the SET clause when refreshing objects; if the evaluators make use of math operators against non-numeric values such as PostgreSQL JSONB, the non-evaluable condition would fail to be detected correctly. The evaluator now limits the use of math mutation operators to numeric types only, with the exception of "+" that continues to work for strings as well. SQLAlchemy 2.0 may alter this further by fetching the SET values completely rather than using evaluation. For 1.4 this also adds "concat_op" as evaluable; 2.0 already has more string operator support Fixes: #8507 Change-Id: Icf7120ccbf4266499df6bb3e05159c9f50971d69 (cherry picked from commit 4ab1bc641c7d5833cf20d8ab9b38f5bfba37cfdd) --- doc/build/changelog/unreleased_14/8507.rst | 13 ++++ lib/sqlalchemy/orm/evaluator.py | 48 +++++++++++-- test/orm/test_evaluator.py | 83 ++++++++++++++++++++++ 3 files changed, 137 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8507.rst diff --git a/doc/build/changelog/unreleased_14/8507.rst b/doc/build/changelog/unreleased_14/8507.rst new file mode 100644 index 00000000000..07944da75da --- /dev/null +++ b/doc/build/changelog/unreleased_14/8507.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 8507 + + Fixed regression where using ORM update() with synchronize_session='fetch' + would fail due to the use of evaluators that are now used to determine the + in-Python value for expressions in the the SET clause when refreshing + objects; if the evaluators make use of math operators against non-numeric + values such as PostgreSQL JSONB, the non-evaluable condition would fail to + be detected correctly. The evaluator now limits the use of math mutation + operators to numeric types only, with the exception of "+" that continues + to work for strings as well. SQLAlchemy 2.0 may alter this further by + fetching the SET values completely rather than using evaluation. diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index dbbfba09f01..f1d9ca5413d 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -11,6 +11,8 @@ from .. import util from ..sql import and_ from ..sql import operators +from ..sql.sqltypes import Integer +from ..sql.sqltypes import Numeric class UnevaluatableError(Exception): @@ -30,12 +32,6 @@ def reverse_operate(self, *arg, **kw): _straight_ops = set( getattr(operators, op) for op in ( - "add", - "mul", - "sub", - "div", - "mod", - "truediv", "lt", "le", "ne", @@ -45,6 +41,18 @@ def reverse_operate(self, *arg, **kw): ) ) +_math_only_straight_ops = set( + getattr(operators, op) + for op in ( + "add", + "mul", + "sub", + "div", + "mod", + "truediv", + ) +) + _extended_ops = { operators.in_op: (lambda a, b: a in b if a is not _NO_OBJECT else None), operators.not_in_op: ( @@ -62,7 +70,6 @@ def reverse_operate(self, *arg, **kw): "startswith_op", "between_op", "endswith_op", - "concat_op", ) ) @@ -191,6 +198,11 @@ def evaluate(obj): def evaluate(obj): return eval_left(obj) != eval_right(obj) + elif operator is operators.concat_op: + + def evaluate(obj): + return eval_left(obj) + eval_right(obj) + elif operator in _extended_ops: def evaluate(obj): @@ -201,6 +213,28 @@ def evaluate(obj): return _extended_ops[operator](left_val, right_val) + elif operator in _math_only_straight_ops: + if ( + clause.left.type._type_affinity + not in ( + Numeric, + Integer, + ) + or clause.right.type._type_affinity not in (Numeric, Integer) + ): + raise UnevaluatableError( + 'Cannot evaluate math operator "%s" for ' + "datatypes %s, %s" + % (operator.__name__, clause.left.type, clause.right.type) + ) + + def evaluate(obj): + left_val = eval_left(obj) + right_val = eval_right(obj) + if left_val is None or right_val is None: + return None + return operator(eval_left(obj), eval_right(obj)) + elif operator in _straight_ops: def evaluate(obj): diff --git a/test/orm/test_evaluator.py b/test/orm/test_evaluator.py index 62acca58270..5902264e36e 100644 --- a/test/orm/test_evaluator.py +++ b/test/orm/test_evaluator.py @@ -5,15 +5,19 @@ from sqlalchemy import ForeignKey from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import JSON from sqlalchemy import not_ from sqlalchemy import or_ from sqlalchemy import String +from sqlalchemy import testing from sqlalchemy import tuple_ from sqlalchemy.orm import evaluator from sqlalchemy.orm import exc as orm_exc from sqlalchemy.orm import relationship from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message +from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -50,6 +54,7 @@ def define_tables(cls, metadata): Column("id", Integer, primary_key=True), Column("name", String(64)), Column("othername", String(64)), + Column("json", JSON), ) @classmethod @@ -200,6 +205,24 @@ def test_boolean_ops(self): ], ) + @testing.combinations( + lambda User: User.name + "_foo" == "named_foo", + # not implemented in 1.4 + # lambda User: User.name.startswith("nam"), + # lambda User: User.name.endswith("named"), + ) + def test_string_ops(self, expr): + User = self.classes.User + + test_expr = testing.resolve_lambda(expr, User=User) + eval_eq( + test_expr, + testcases=[ + (User(name="named"), True), + (User(name="othername"), False), + ], + ) + def test_in(self): User = self.classes.User @@ -268,6 +291,66 @@ def test_null_propagation(self): ], ) + @testing.combinations( + (lambda User: User.id + 5, "id", 10, 15, None), + ( + lambda User: User.name + " name", + "name", + "some value", + "some value name", + None, + ), + ( + lambda User: User.id + "name", + "id", + 10, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"add\" for " + r"datatypes INTEGER, VARCHAR", + ), + ( + lambda User: User.json + 12, + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"add\" for " + r"datatypes JSON, INTEGER", + ), + ( + lambda User: User.json - 12, + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"sub\" for " + r"datatypes JSON, INTEGER", + ), + ( + lambda User: User.json - "foo", + "json", + {"foo": "bar"}, + evaluator.UnevaluatableError, + r"Cannot evaluate math operator \"sub\" for " + r"datatypes JSON, VARCHAR", + ), + ) + def test_math_op_type_exclusions( + self, expr, attrname, initial_value, expected, message + ): + """test #8507""" + + User = self.classes.User + + expr = testing.resolve_lambda(expr, User=User) + + if expected is evaluator.UnevaluatableError: + with expect_raises_message(evaluator.UnevaluatableError, message): + compiler.process(expr) + else: + obj = User(**{attrname: initial_value}) + + new_value = compiler.process(expr)(obj) + eq_(new_value, expected) + class M2OEvaluateTest(fixtures.DeclarativeMappedTest): @classmethod From f1ff837f84d7dc405ad23ad010912bceae0727bc Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 13 Sep 2022 21:23:12 +0200 Subject: [PATCH 371/632] Improved usage of ``asyncio.shield()`` Fixes: #8516 Change-Id: Ifd8f5e5f42d9fbcd5b8d00bddc81ff6be690a75e (cherry picked from commit e2e85de93daef31c75d397251cee2fbee7a5de65) --- doc/build/changelog/unreleased_14/8516.rst | 9 +++++++++ lib/sqlalchemy/ext/asyncio/base.py | 6 ++++++ lib/sqlalchemy/ext/asyncio/engine.py | 6 ++++-- lib/sqlalchemy/ext/asyncio/session.py | 7 ++++--- 4 files changed, 23 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8516.rst diff --git a/doc/build/changelog/unreleased_14/8516.rst b/doc/build/changelog/unreleased_14/8516.rst new file mode 100644 index 00000000000..2f83586e2a8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8516.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, asyncio + :tickets: 8516 + + Improved implementation of ``asyncio.shield()`` used in context managers as + added in :ticket:`8145`, such that the "close" operation is enclosed within + an ``asyncio.Task`` which is then strongly referenced as the operation + proceeds. This is per Python documentation indicating that the task is + otherwise not strongly referenced. diff --git a/lib/sqlalchemy/ext/asyncio/base.py b/lib/sqlalchemy/ext/asyncio/base.py index 3f77f55007e..ae100ecf5f0 100644 --- a/lib/sqlalchemy/ext/asyncio/base.py +++ b/lib/sqlalchemy/ext/asyncio/base.py @@ -1,3 +1,9 @@ +# ext/asyncio/base.py +# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import abc import functools import weakref diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 4fbe4f7a592..5bfda150704 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -551,7 +551,8 @@ def __await__(self): return self.start().__await__() async def __aexit__(self, type_, value, traceback): - await asyncio.shield(self.close()) + task = asyncio.create_task(self.close()) + await asyncio.shield(task) @util.create_proxy_methods( @@ -606,7 +607,8 @@ async def go(): await self.transaction.__aexit__(type_, value, traceback) await self.conn.close() - await asyncio.shield(go()) + task = asyncio.create_task(go()) + await asyncio.shield(task) def __init__(self, sync_engine): if not sync_engine.dialect.is_async: diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 378cbcbf2f8..7346840579c 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -4,7 +4,6 @@ # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php - import asyncio from . import engine @@ -628,7 +627,8 @@ async def __aenter__(self): return self async def __aexit__(self, type_, value, traceback): - await asyncio.shield(self.close()) + task = asyncio.create_task(self.close()) + await asyncio.shield(task) def _maker_context_manager(self): # no @contextlib.asynccontextmanager until python3.7, gr @@ -649,7 +649,8 @@ async def go(): await self.trans.__aexit__(type_, value, traceback) await self.async_session.__aexit__(type_, value, traceback) - await asyncio.shield(go()) + task = asyncio.create_task(go()) + await asyncio.shield(task) class AsyncSessionTransaction(ReversibleProxy, StartableContext): From 3bd8a3625f17b124557b0d3346c51b39782a9fb6 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 15 Sep 2022 23:38:47 +0200 Subject: [PATCH 372/632] Make 652755e7571c8cc2ec4e8beab8ef6b1f180c496b compatible with py36 Change-Id: I8072146e9c4405a96b566392afa8e57661aa2a42 --- lib/sqlalchemy/ext/asyncio/engine.py | 4 ++-- lib/sqlalchemy/ext/asyncio/session.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 5bfda150704..ba4dd39b6f2 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -551,7 +551,7 @@ def __await__(self): return self.start().__await__() async def __aexit__(self, type_, value, traceback): - task = asyncio.create_task(self.close()) + task = asyncio.get_running_loop().create_task(self.close()) await asyncio.shield(task) @@ -607,7 +607,7 @@ async def go(): await self.transaction.__aexit__(type_, value, traceback) await self.conn.close() - task = asyncio.create_task(go()) + task = asyncio.get_running_loop().create_task(go()) await asyncio.shield(task) def __init__(self, sync_engine): diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 7346840579c..61874378dd0 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -627,7 +627,7 @@ async def __aenter__(self): return self async def __aexit__(self, type_, value, traceback): - task = asyncio.create_task(self.close()) + task = asyncio.get_running_loop().create_task(self.close()) await asyncio.shield(task) def _maker_context_manager(self): @@ -649,7 +649,7 @@ async def go(): await self.trans.__aexit__(type_, value, traceback) await self.async_session.__aexit__(type_, value, traceback) - task = asyncio.create_task(go()) + task = asyncio.get_running_loop().create_task(go()) await asyncio.shield(task) From 271b71e8d07e719529cf931d0f3f9563af70b911 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 15 Sep 2022 18:54:31 -0400 Subject: [PATCH 373/632] use get_event_loop() for python 3.6 per https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_running_loop the get_event_loop method will eventually be an alias for get_running_loop. the latter is not present in python 3.6 Fixes: #8516 Change-Id: Idc9ba0ca5030e7f5878d31a9ab5b5cc5d40f98b9 --- lib/sqlalchemy/ext/asyncio/engine.py | 4 ++-- lib/sqlalchemy/ext/asyncio/session.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index ba4dd39b6f2..94e54dc65e8 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -551,7 +551,7 @@ def __await__(self): return self.start().__await__() async def __aexit__(self, type_, value, traceback): - task = asyncio.get_running_loop().create_task(self.close()) + task = asyncio.get_event_loop().create_task(self.close()) await asyncio.shield(task) @@ -607,7 +607,7 @@ async def go(): await self.transaction.__aexit__(type_, value, traceback) await self.conn.close() - task = asyncio.get_running_loop().create_task(go()) + task = asyncio.get_event_loop().create_task(go()) await asyncio.shield(task) def __init__(self, sync_engine): diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 61874378dd0..d167ec0e980 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -627,7 +627,7 @@ async def __aenter__(self): return self async def __aexit__(self, type_, value, traceback): - task = asyncio.get_running_loop().create_task(self.close()) + task = asyncio.get_event_loop().create_task(self.close()) await asyncio.shield(task) def _maker_context_manager(self): @@ -649,7 +649,7 @@ async def go(): await self.trans.__aexit__(type_, value, traceback) await self.async_session.__aexit__(type_, value, traceback) - task = asyncio.get_running_loop().create_task(go()) + task = asyncio.get_event_loop().create_task(go()) await asyncio.shield(task) From 02b1ed522038cbe90f9548c49ce41c29fc68032c Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 16 Sep 2022 23:50:47 +0200 Subject: [PATCH 374/632] fix documentation typos Closes #8527 Change-Id: I0354f3953075fa35a84b09ad45fd850d8889c992 --- doc/build/changelog/changelog_13.rst | 2 +- doc/build/changelog/changelog_14.rst | 2 +- doc/build/orm/composites.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/changelog_13.rst b/doc/build/changelog/changelog_13.rst index 00c67ea3bc8..629387ff97b 100644 --- a/doc/build/changelog/changelog_13.rst +++ b/doc/build/changelog/changelog_13.rst @@ -2681,7 +2681,7 @@ Fixed bug where the :attr:`_orm.Mapper.all_orm_descriptors` accessor would return an entry for the :class:`_orm.Mapper` itself under the declarative - ``__mapper___`` key, when this is not a descriptor. The ``.is_attribute`` + ``__mapper__`` key, when this is not a descriptor. The ``.is_attribute`` flag that's present on all :class:`.InspectionAttr` objects is now consulted, which has also been modified to be ``True`` for an association proxy, as it was erroneously set to False for this object. diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 060d964a148..5a267ebd0d4 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -3175,7 +3175,7 @@ This document details individual issue-level changes made throughout ``@validates`` validator function or a ``@reconstructor`` reconstruction function, to check for "callable" more liberally such as to accommodate objects based on fundamental attributes like ``__func__`` and - ``__call___``, rather than testing for ``MethodType`` / ``FunctionType``, + ``__call__``, rather than testing for ``MethodType`` / ``FunctionType``, allowing things like cython functions to work properly. Pull request courtesy Miłosz Stypiński. diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index 69fc93622b2..181993db5c0 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -144,7 +144,7 @@ the same expression that the base "greater than" does:: class Vertex(Base): - ___tablename__ = "vertices" + __tablename__ = "vertices" id = Column(Integer, primary_key=True) x1 = Column(Integer) From 73156d521d3fac5eb6ade44915d5b7ef8e42b751 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 17 Sep 2022 10:18:56 -0400 Subject: [PATCH 375/632] remove obtuse section about "bundled bind parameters" Just looking for basics on insert in the first pages of the tutorial I see this weird detour into something that nobody ever uses and definitely isn't going to make sense to the people I see complaining about our docs on twitter, remove this. the tutorial probably needs a big sweep for wordy obtuse things. the userbase is changing and we really have a lot of brand-new-to-programming types coming in. Change-Id: I3bb11f0399e55edbb8f874e7eb63c40616b04e8b (cherry picked from commit f0bcd57f9ed76ba8d871448d821a85089f490b6c) --- doc/build/tutorial/dbapi_transactions.rst | 48 ++--------------------- 1 file changed, 3 insertions(+), 45 deletions(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index d7ac0b87c42..6492f5f0ec2 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -398,48 +398,6 @@ for this use case. however again when using the ORM, there is a different technique generally used for updating or deleting many individual rows separately. -.. rst-class:: orm-addin - -.. _tutorial_bundling_parameters: - -Bundling Parameters with a Statement -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The two previous cases illustrate a series of parameters being passed to -accompany a SQL statement. For single-parameter statement executions, -SQLAlchemy's use of parameters is in fact more often than not done by -**bundling** the parameters with the statement itself, which is a primary -feature of the SQL Expression Language and makes for queries that can be -composed naturally while still making use of parameterization in all cases. -This concept will be discussed in much more detail in the sections that follow; -for a brief preview, the :func:`_sql.text` construct itself being part of the -SQL Expression Language supports this feature by using the -:meth:`_sql.TextClause.bindparams` method; this is a :term:`generative` method that -returns a new copy of the SQL construct with additional state added, in this -case the parameter values we want to pass along: - - -.. sourcecode:: pycon+sql - - >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y").bindparams(y=6) - >>> with engine.connect() as conn: - ... result = conn.execute(stmt) - ... for row in result: - ... print(f"x: {row.x} y: {row.y}") - {opensql}BEGIN (implicit) - SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y - [...] (6,) - {stop}x: 6 y: 8 - x: 9 y: 10 - x: 11 y: 12 - x: 13 y: 14 - {opensql}ROLLBACK{stop} - - -The interesting thing to note above is that even though we passed only a single -argument, ``stmt``, to the :meth:`_future.Connection.execute` method, the -execution of the statement illustrated both the SQL string as well as the -separate parameter tuple. .. rst-class:: orm-addin @@ -474,9 +432,9 @@ a context manager: >>> from sqlalchemy.orm import Session - >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y").bindparams(y=6) + >>> stmt = text("SELECT x, y FROM some_table WHERE y > :y ORDER BY x, y") >>> with Session(engine) as session: - ... result = session.execute(stmt) + ... result = session.execute(stmt, {"y": 6}) ... for row in result: ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) @@ -489,7 +447,7 @@ a context manager: {opensql}ROLLBACK{stop} The example above can be compared to the example in the preceding section -in :ref:`tutorial_bundling_parameters` - we directly replace the call to +in :ref:`tutorial_sending_parameters` - we directly replace the call to ``with engine.connect() as conn`` with ``with Session(engine) as session``, and then make use of the :meth:`_orm.Session.execute` method just like we do with the :meth:`_future.Connection.execute` method. From 4428dce5a633fca699b423dcb76672a5f9e4c0d8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 17 Sep 2022 10:33:55 -0400 Subject: [PATCH 376/632] change verbiage stating exact compliance with RFC-1738 As long as we aren't using urlparse() to parse URLs, we are not RFC-1738 compliant. As we accept underscores in the scheme and not dashes or dots, we are not RFC-1738 compliant, so emulate language like that of PostgreSQL [1] that we "generally follow" this scheme but include some exceptions. [1] https://www.postgresql.org/docs/current/libpq-connect.html#id-1.7.3.8.3.6 Fixes: #8519 Change-Id: I2d7e55d9df17aed122cebb2c4c315f56c06a3da5 (cherry picked from commit c88bb2167b1c4b39c7f9378b621bb8d429269d90) --- doc/build/changelog/changelog_02.rst | 6 ++++- doc/build/changelog/changelog_09.rst | 10 ++++---- doc/build/core/engines.rst | 14 ++++++----- lib/sqlalchemy/engine/url.py | 35 +++++++++++++++++----------- 4 files changed, 39 insertions(+), 26 deletions(-) diff --git a/doc/build/changelog/changelog_02.rst b/doc/build/changelog/changelog_02.rst index 69805d60980..3d40a79a32a 100644 --- a/doc/build/changelog/changelog_02.rst +++ b/doc/build/changelog/changelog_02.rst @@ -1057,7 +1057,11 @@ :tickets: create_engine now takes only RFC-1738-style strings: - driver://user:password@host:port/database + ``driver://user:password@host:port/database`` + + **update** this format is generally but not exactly RFC-1738, + including that underscores, not dashes or periods, are accepted in the + "scheme" portion. .. change:: :tags: diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index acf1ede9232..c9ec5f3a49a 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -2647,11 +2647,11 @@ :tags: bug, engine :tickets: 2873 - The :func:`_sa.create_engine` routine and the related - :func:`.make_url` function no longer considers the ``+`` sign - to be a space within the password field. The parsing has been - adjusted to match RFC 1738 exactly, in that both ``username`` - and ``password`` expect only ``:``, ``@``, and ``/`` to be + The :func:`_sa.create_engine` routine and the related :func:`.make_url` + function no longer considers the ``+`` sign to be a space within the + password field. The parsing in this area has been adjusted to match + more closely to how RFC 1738 handles these tokens, in that both + ``username`` and ``password`` expect only ``:``, ``@``, and ``/`` to be encoded. .. seealso:: diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index be14536919a..ffbfc108888 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -55,12 +55,14 @@ See the section :ref:`dialect_toplevel` for information on the various backends Database URLs ============= -The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` object based -on a URL. These URLs follow `RFC-1738 -`_, and usually can include username, password, -hostname, database name as well as optional keyword arguments for additional configuration. -In some cases a file path is accepted, and in others a "data source name" replaces -the "host" and "database" portions. The typical form of a database URL is: +The :func:`_sa.create_engine` function produces an :class:`_engine.Engine` +object based on a URL. The format of the URL generally follows `RFC-1738 +`_, with some exceptions, including that +underscores, not dashes or periods, are accepted within the "scheme" portion. +URLs typically include username, password, hostname, database name fields, as +well as optional keyword arguments for additional configuration. In some cases +a file path is accepted, and in others a "data source name" replaces the "host" +and "database" portions. The typical form of a database URL is: .. sourcecode:: none diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index db971c2ab50..a8138c1b48c 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -43,9 +43,10 @@ class URL( Represent the components of a URL used to connect to a database. This object is suitable to be passed directly to a - :func:`_sa.create_engine` call. The fields of the URL are parsed - from a string by the :func:`.make_url` function. The string - format of the URL is an RFC-1738-style string. + :func:`_sa.create_engine` call. The fields of the URL are parsed from a + string by the :func:`.make_url` function. The string format of the URL + generally follows `RFC-1738 `_, with + some exceptions. To create a new :class:`_engine.URL` object, use the :func:`_engine.url.make_url` function. To construct a :class:`_engine.URL` @@ -521,12 +522,12 @@ def render_as_string(self, hide_password=True): """ s = self.drivername + "://" if self.username is not None: - s += _rfc_1738_quote(self.username) + s += _sqla_url_quote(self.username) if self.password is not None: s += ":" + ( "***" if hide_password - else _rfc_1738_quote(str(self.password)) + else _sqla_url_quote(str(self.password)) ) s += "@" if self.host is not None: @@ -717,17 +718,23 @@ def translate_connect_args(self, names=None, **kw): def make_url(name_or_url): """Given a string or unicode instance, produce a new URL instance. - The given string is parsed according to the RFC 1738 spec. If an - existing URL object is passed, just returns the object. + + The format of the URL generally follows `RFC-1738 + `_, with some exceptions, including + that underscores, and not dashes or periods, are accepted within the + "scheme" portion. + + If a :class:`.URL` object is passed, it is returned as is. + """ if isinstance(name_or_url, util.string_types): - return _parse_rfc1738_args(name_or_url) + return _parse_url(name_or_url) else: return name_or_url -def _parse_rfc1738_args(name): +def _parse_url(name): pattern = re.compile( r""" (?P[\w\+]+):// @@ -767,10 +774,10 @@ def _parse_rfc1738_args(name): components["query"] = query if components["username"] is not None: - components["username"] = _rfc_1738_unquote(components["username"]) + components["username"] = _sqla_url_unquote(components["username"]) if components["password"] is not None: - components["password"] = _rfc_1738_unquote(components["password"]) + components["password"] = _sqla_url_unquote(components["password"]) ipv4host = components.pop("ipv4host") ipv6host = components.pop("ipv6host") @@ -784,15 +791,15 @@ def _parse_rfc1738_args(name): else: raise exc.ArgumentError( - "Could not parse rfc1738 URL from string '%s'" % name + "Could not parse SQLAlchemy URL from string '%s'" % name ) -def _rfc_1738_quote(text): +def _sqla_url_quote(text): return re.sub(r"[:@/]", lambda m: "%%%X" % ord(m.group(0)), text) -def _rfc_1738_unquote(text): +def _sqla_url_unquote(text): return util.unquote(text) From bbd90a987eea70c28dd1e2ee7007722b16ec9f74 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 19 Sep 2022 09:40:40 -0400 Subject: [PATCH 377/632] break out text() from TextualSelect for col matching Fixed issue where mixing "*" with additional explicitly-named column expressions within the columns clause of a :func:`_sql.select` construct would cause result-column targeting to sometimes consider the label name or other non-repeated names to be an ambiguous target. Fixes: #8536 Change-Id: I3c845eaf571033e54c9208762344f67f4351ac3a (cherry picked from commit 78327d98be9236c61f950526470f29b184dabba6) --- doc/build/changelog/unreleased_14/8536.rst | 8 ++++ lib/sqlalchemy/engine/cursor.py | 24 +++++++++--- lib/sqlalchemy/engine/default.py | 1 + lib/sqlalchemy/sql/compiler.py | 16 +++++++- test/requirements.py | 11 ++++++ test/sql/test_resultset.py | 44 ++++++++++++++++++++++ 6 files changed, 98 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8536.rst diff --git a/doc/build/changelog/unreleased_14/8536.rst b/doc/build/changelog/unreleased_14/8536.rst new file mode 100644 index 00000000000..d7b5283cdea --- /dev/null +++ b/doc/build/changelog/unreleased_14/8536.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, engine + :tickets: 8536 + + Fixed issue where mixing "*" with additional explicitly-named column + expressions within the columns clause of a :func:`_sql.select` construct + would cause result-column targeting to sometimes consider the label name or + other non-repeated names to be an ambiguous target. diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 774916d95df..168e08d1114 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -165,6 +165,7 @@ def __init__(self, parent, cursor_description): result_columns, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ) = context.result_column_struct num_ctx_cols = len(result_columns) @@ -173,6 +174,8 @@ def __init__(self, parent, cursor_description): cols_are_ordered ) = ( num_ctx_cols + ) = ( + ad_hoc_textual ) = loose_column_name_matching = textual_ordered = False # merge cursor.description with the column info @@ -184,6 +187,7 @@ def __init__(self, parent, cursor_description): num_ctx_cols, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ) @@ -214,11 +218,18 @@ def __init__(self, parent, cursor_description): # column keys and other names if num_ctx_cols: - # if by-primary-string dictionary smaller (or bigger?!) than - # number of columns, assume we have dupes, rewrite - # dupe records with "None" for index which results in - # ambiguous column exception when accessed. if len(by_key) != num_ctx_cols: + # if by-primary-string dictionary smaller than + # number of columns, assume we have dupes; (this check + # is also in place if string dictionary is bigger, as + # can occur when '*' was used as one of the compiled columns, + # which may or may not be suggestive of dupes), rewrite + # dupe records with "None" for index which results in + # ambiguous column exception when accessed. + # + # this is considered to be the less common case as it is not + # common to have dupe column keys in a SELECT statement. + # # new in 1.4: get the complete set of all possible keys, # strings, objects, whatever, that are dupes across two # different records, first. @@ -291,6 +302,7 @@ def _merge_cursor_description( num_ctx_cols, cols_are_ordered, textual_ordered, + ad_hoc_textual, loose_column_name_matching, ): """Merge a cursor.description with compiled result column information. @@ -386,7 +398,9 @@ def _merge_cursor_description( # name-based or text-positional cases, where we need # to read cursor.description names - if textual_ordered: + if textual_ordered or ( + ad_hoc_textual and len(cursor_description) == num_ctx_cols + ): self._safe_for_cache = True # textual positional case raw_iterator = self._merge_textual_cols_by_position( diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 6b58c44696b..e050bea7a7f 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -975,6 +975,7 @@ def _init_compiled( compiled._result_columns, compiled._ordered_columns, compiled._textual_ordered_columns, + compiled._ad_hoc_textual, compiled._loose_column_name_matching, ) self.isinsert = compiled.isinsert diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index c9b6ba670c2..0e441fbec8e 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -611,6 +611,20 @@ class SQLCompiler(Compiled): _textual_ordered_columns = False """tell the result object that the column names as rendered are important, but they are also "ordered" vs. what is in the compiled object here. + + As of 1.4.42 this condition is only present when the statement is a + TextualSelect, e.g. text("....").columns(...), where it is required + that the columns are considered positionally and not by name. + + """ + + _ad_hoc_textual = False + """tell the result that we encountered text() or '*' constructs in the + middle of the result columns, but we also have compiled columns, so + if the number of columns in cursor.description does not match how many + expressions we have, that means we can't rely on positional at all and + should match on name. + """ _ordered_columns = True @@ -3024,7 +3038,7 @@ def get_render_as_alias_suffix(self, alias_name_text): def _add_to_result_map(self, keyname, name, objects, type_): if keyname is None or keyname == "*": self._ordered_columns = False - self._textual_ordered_columns = True + self._ad_hoc_textual = True if type_._is_tuple_type: raise exc.CompileError( "Most backends don't support SELECTing " diff --git a/test/requirements.py b/test/requirements.py index 68e5f8bfe26..ca074c79b26 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -363,6 +363,17 @@ def cursor_works_post_rollback(self): return skip_if(["+pyodbc"], "no driver support") + @property + def select_star_mixed(self): + r"""target supports expressions like "SELECT x, y, \*, z FROM table" + + apparently MySQL / MariaDB, Oracle doesn't handle this. + + We only need a few backends so just cover SQLite / PG + + """ + return only_on(["sqlite", "postgresql"]) + @property def independent_connections(self): """ diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 13190f915f9..5d29b0b2b1f 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -1020,6 +1020,50 @@ def test_ambiguous_column_contains(self, connection): set([True]), ) + @testing.combinations( + (("name_label", "*"), False), + (("*", "name_label"), False), + (("user_id", "name_label", "user_name"), False), + (("user_id", "name_label", "*", "user_name"), True), + argnames="cols,other_cols_are_ambiguous", + ) + @testing.requires.select_star_mixed + def test_label_against_star( + self, connection, cols, other_cols_are_ambiguous + ): + """test #8536""" + users = self.tables.users + + connection.execute(users.insert(), dict(user_id=1, user_name="john")) + + stmt = select( + *[ + text("*") + if colname == "*" + else users.c.user_name.label("name_label") + if colname == "name_label" + else users.c[colname] + for colname in cols + ] + ) + + row = connection.execute(stmt).first() + + eq_(row._mapping["name_label"], "john") + + if other_cols_are_ambiguous: + with expect_raises_message( + exc.InvalidRequestError, "Ambiguous column name" + ): + row._mapping["user_id"] + with expect_raises_message( + exc.InvalidRequestError, "Ambiguous column name" + ): + row._mapping["user_name"] + else: + eq_(row._mapping["user_id"], 1) + eq_(row._mapping["user_name"], "john") + def test_loose_matching_one(self, connection): users = self.tables.users addresses = self.tables.addresses From 4d6d402d73cb64877b21bb3917bda8cf1a23f296 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 23 Sep 2022 15:17:57 -0400 Subject: [PATCH 378/632] remove should_nest behavior for contains_eager() Fixed regression for 1.4 in :func:`_orm.contains_eager` where the "wrap in subquery" logic of :func:`_orm.joinedload` would be inadvertently triggered for use of the :func:`_orm.contains_eager` function with similar statements (e.g. those that use ``distinct()``, ``limit()`` or ``offset()``). This is not appropriate for :func:`_orm.contains_eager` which has always had the contract that the user-defined SQL statement is unmodified with the exception of adding the appropriate columns. Also includes an adjustment to the assertion in Label._make_proxy() which was there to prevent a fixed label name from being anonymized; if the label is already anonymous, the change should proceed. This logic was being hit before the contains_eager behavior was adjusted. With the adjustment, this code is not used. Fixes: #8569 Change-Id: I161e65041c0162fd2b83cbef40f57a50fcfaf0fd (cherry picked from commit 57b400f07951f0ae8651ca38338ec5be1d222c7e) --- lib/sqlalchemy/orm/context.py | 8 ++- lib/sqlalchemy/orm/strategies.py | 13 ++++ lib/sqlalchemy/sql/elements.py | 2 +- test/orm/test_core_compilation.py | 103 ++++++++++++++++++++++++++---- test/orm/test_eager_relations.py | 8 ++- test/orm/test_query.py | 18 ++++++ test/sql/test_selectable.py | 22 ++++++- 7 files changed, 157 insertions(+), 17 deletions(-) diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index d5a742cc578..379b65ac7e9 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -397,6 +397,7 @@ class ORMFromStatementCompileState(ORMCompileState): _has_orm_entities = False multi_row_eager_loaders = False + eager_adding_joins = False compound_eager_adapter = None extra_criteria_entities = _EMPTY_DICT @@ -592,6 +593,7 @@ class ORMSelectCompileState(ORMCompileState, SelectState): _has_orm_entities = False multi_row_eager_loaders = False + eager_adding_joins = False compound_eager_adapter = None correlate = None @@ -900,7 +902,11 @@ def _setup_for_generate(self): if self.order_by is False: self.order_by = None - if self.multi_row_eager_loaders and self._should_nest_selectable: + if ( + self.multi_row_eager_loaders + and self.eager_adding_joins + and self._should_nest_selectable + ): self.statement = self._compound_eager_statement() else: self.statement = self._simple_statement() diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 944c114a640..a014b2f4115 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1965,6 +1965,9 @@ def setup_query( ) if user_defined_adapter is not False: + + # setup an adapter but dont create any JOIN, assume it's already + # in the query ( clauses, adapter, @@ -1976,6 +1979,11 @@ def setup_query( adapter, user_defined_adapter, ) + + # don't do "wrap" for multi-row, we want to wrap + # limited/distinct SELECT, + # because we want to put the JOIN on the outside. + else: # if not via query option, check for # a cycle @@ -1986,6 +1994,7 @@ def setup_query( elif path.contains_mapper(self.mapper): return + # add the JOIN and create an adapter ( clauses, adapter, @@ -2002,6 +2011,10 @@ def setup_query( chained_from_outerjoin, ) + # for multi-row, we want to wrap limited/distinct SELECT, + # because we want to put the JOIN on the outside. + compile_state.eager_adding_joins = True + with_poly_entity = path.get( compile_state.attributes, "path_with_polymorphic", None ) diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 268c0d6ac4d..ace43b3a1d4 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -4636,7 +4636,7 @@ def _make_proxy(self, selectable, name=None, **kw): # when a label name conflicts with other columns and select() # is attempting to disambiguate an explicit label, which is not what # the user would want. See issue #6090. - if key != self.name: + if key != self.name and not isinstance(self.name, _anonymous_label): raise exc.InvalidRequestError( "Label name %s is being renamed to an anonymous label due " "to disambiguation " diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 1457f873c5f..c0c530b4c07 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -2,6 +2,7 @@ from sqlalchemy import Column from sqlalchemy import delete from sqlalchemy import exc +from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import insert from sqlalchemy import inspect @@ -10,6 +11,7 @@ from sqlalchemy import null from sqlalchemy import or_ from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import union @@ -1023,20 +1025,35 @@ def plain_fixture(self): self.mapper_registry.map_imperatively( User, users, + properties={ + "addresses": relationship(Address, back_populates="user") + }, ) self.mapper_registry.map_imperatively( Address, addresses, properties={ - "user": relationship( - User, - ) + "user": relationship(User, back_populates="addresses") }, ) return User, Address + @testing.fixture + def hard_labeled_self_ref_fixture(self, decl_base): + class A(decl_base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + data = Column(String) + data_lower = column_property(func.lower(data).label("hardcoded")) + + as_ = relationship("A") + + return A + def test_no_joinedload_embedded(self, plain_fixture): User, Address = plain_fixture @@ -1145,22 +1162,84 @@ def test_joinedload_outermost(self, plain_fixture): "ON users_1.id = addresses.user_id", ) - def test_contains_eager_outermost(self, plain_fixture): + def test_joinedload_outermost_w_wrapping_elements(self, plain_fixture): User, Address = plain_fixture stmt = ( - select(Address) - .join(Address.user) - .options(contains_eager(Address.user)) + select(User) + .options(joinedload(User.addresses)) + .limit(10) + .distinct() ) - # render joined eager loads with stringify self.assert_compile( stmt, - "SELECT users.id, users.name, addresses.id AS id_1, " - "addresses.user_id, " - "addresses.email_address " - "FROM addresses JOIN users ON users.id = addresses.user_id", + "SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, " + "addresses_1.user_id, addresses_1.email_address FROM " + "(SELECT DISTINCT users.id AS id, users.name AS name FROM users " + "LIMIT :param_1) " + "AS anon_1 LEFT OUTER JOIN addresses AS addresses_1 " + "ON anon_1.id = addresses_1.user_id", + ) + + def test_contains_eager_outermost_w_wrapping_elements(self, plain_fixture): + """test #8569""" + + User, Address = plain_fixture + + stmt = ( + select(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .limit(10) + .distinct() + ) + + self.assert_compile( + stmt, + "SELECT DISTINCT addresses.id, addresses.user_id, " + "addresses.email_address, users.id AS id_1, users.name " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "LIMIT :param_1", + ) + + def test_joinedload_hard_labeled_selfref( + self, hard_labeled_self_ref_fixture + ): + """test #8569""" + + A = hard_labeled_self_ref_fixture + + stmt = select(A).options(joinedload(A.as_)).distinct() + self.assert_compile( + stmt, + "SELECT anon_1.hardcoded, anon_1.id, anon_1.a_id, anon_1.data, " + "lower(a_1.data) AS lower_1, a_1.id AS id_1, a_1.a_id AS a_id_1, " + "a_1.data AS data_1 FROM (SELECT DISTINCT lower(a.data) AS " + "hardcoded, a.id AS id, a.a_id AS a_id, a.data AS data FROM a) " + "AS anon_1 LEFT OUTER JOIN a AS a_1 ON anon_1.id = a_1.a_id", + ) + + def test_contains_eager_hard_labeled_selfref( + self, hard_labeled_self_ref_fixture + ): + """test #8569""" + + A = hard_labeled_self_ref_fixture + + a1 = aliased(A) + stmt = ( + select(A) + .join(A.as_.of_type(a1)) + .options(contains_eager(A.as_.of_type(a1))) + .distinct() + ) + self.assert_compile( + stmt, + "SELECT DISTINCT lower(a.data) AS hardcoded, " + "lower(a_1.data) AS hardcoded, a_1.id, a_1.a_id, a_1.data, " + "a.id AS id_1, a.a_id AS a_id_1, a.data AS data_1 " + "FROM a JOIN a AS a_1 ON a.id = a_1.a_id", ) def test_column_properties(self, column_property_fixture): diff --git a/test/orm/test_eager_relations.py b/test/orm/test_eager_relations.py index b2a5ed33f39..fb7550e0ea3 100644 --- a/test/orm/test_eager_relations.py +++ b/test/orm/test_eager_relations.py @@ -3135,10 +3135,14 @@ def test_many_to_one(self): eq_(result.scalars().all(), self.static.address_user_result) - def test_unique_error(self): + @testing.combinations(joinedload, contains_eager) + def test_unique_error(self, opt): User = self.classes.User - stmt = select(User).options(joinedload(User.addresses)) + stmt = select(User).options(opt(User.addresses)) + if opt is contains_eager: + stmt = stmt.join(User.addresses) + s = fixture_session() result = s.execute(stmt) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index ddaa3c60dab..9779462a246 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -5463,6 +5463,24 @@ def test_no_joinedload_opt(self): q.all, ) + def test_no_contains_eager_opt(self): + self._eagerload_mappings() + + User = self.classes.User + sess = fixture_session() + q = ( + sess.query(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .yield_per(1) + ) + assert_raises_message( + sa_exc.InvalidRequestError, + "Can't use yield_per with eager loaders that require " + "uniquing or row buffering", + q.all, + ) + def test_no_subqueryload_opt(self): self._eagerload_mappings() diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index e0113a7f101..a3f7b7c4682 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -384,8 +384,10 @@ def test_labels_anon_generate_binds_subquery(self): @testing.combinations((True,), (False,)) def test_broken_select_same_named_explicit_cols(self, use_anon): - # this is issue #6090. the query is "wrong" and we dont know how + """test for #6090. the query is "wrong" and we dont know how # to render this right now. + + """ stmt = select( table1.c.col1, table1.c.col2, @@ -412,6 +414,24 @@ def test_broken_select_same_named_explicit_cols(self, use_anon): ): select(stmt.subquery()).compile() + def test_same_anon_named_explicit_cols(self): + """test for #8569. This adjusts the change in #6090 to not apply + to anonymous labels. + + """ + lc = literal_column("col2").label(None) + + subq1 = select(lc).subquery() + + stmt2 = select(subq1, lc).subquery() + + self.assert_compile( + select(stmt2), + "SELECT anon_1.col2_1, anon_1.col2_1_1 FROM " + "(SELECT anon_2.col2_1 AS col2_1, col2 AS col2_1 FROM " + "(SELECT col2 AS col2_1) AS anon_2) AS anon_1", + ) + def test_select_label_grouped_still_corresponds(self): label = select(table1.c.col1).label("foo") label2 = label.self_group() From b9e2926d661b80ee7b7c094c92cfa272d81b0b8b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 24 Sep 2022 11:16:16 -0400 Subject: [PATCH 379/632] add missing changelog for #8569 Missed this in 57b400f07951f0ae8651ca383. have no idea how. Fixes: #8569 Change-Id: I4cec98d8c963930ef822bfd53d8a60a20be02894 (cherry picked from commit 75ab50869b37368f32ec311dfb59777c0c1d1edb) --- doc/build/changelog/unreleased_14/8569.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8569.rst diff --git a/doc/build/changelog/unreleased_14/8569.rst b/doc/build/changelog/unreleased_14/8569.rst new file mode 100644 index 00000000000..fc3b3f73986 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8569.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: orm, bug, regression + :tickets: 8569 + + Fixed regression for 1.4 in :func:`_orm.contains_eager` where the "wrap in + subquery" logic of :func:`_orm.joinedload` would be inadvertently triggered + for use of the :func:`_orm.contains_eager` function with similar statements + (e.g. those that use ``distinct()``, ``limit()`` or ``offset()``), which + would then lead to secondary issues with queries that used some + combinations of SQL label names and aliasing. This "wrapping" is not + appropriate for :func:`_orm.contains_eager` which has always had the + contract that the user-defined SQL statement is unmodified with the + exception of adding the appropriate columns to be fetched. \ No newline at end of file From e05a8464c120fc7d2e3776b5a70fefbbe48be81c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 25 Sep 2022 14:56:22 -0400 Subject: [PATCH 380/632] warn for local-only column in remote side A warning is emitted in ORM configurations when an explicit :func:`_orm.remote` annotation is applied to columns that are local to the immediate mapped class, when the referenced class does not include any of the same table columns. Ideally this would raise an error at some point as it's not correct from a mapping point of view. Fixes: #7094 Fixes: #8575 Change-Id: Ia31be24aebe143161e19dc311b52c08fd5014d33 (cherry picked from commit 29838ef584d49e5ecca08f76e4966454dc7f060f) --- doc/build/changelog/unreleased_14/7094.rst | 9 ++++ lib/sqlalchemy/orm/relationships.py | 16 +++++++ test/orm/test_cycles.py | 3 -- test/orm/test_relationships.py | 49 ++++++++++++++++++++++ 4 files changed, 74 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7094.rst diff --git a/doc/build/changelog/unreleased_14/7094.rst b/doc/build/changelog/unreleased_14/7094.rst new file mode 100644 index 00000000000..b6fb30d9989 --- /dev/null +++ b/doc/build/changelog/unreleased_14/7094.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 7094 + + A warning is emitted in ORM configurations when an explicit + :func:`_orm.remote` annotation is applied to columns that are local to the + immediate mapped class, when the referenced class does not include any of + the same table columns. Ideally this would raise an error at some point as + it's not correct from a mapping point of view. diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index b51ea0e0097..9a6cfb68cc5 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -3195,6 +3195,22 @@ def _check_remote_side(self): "condition that are on the remote side of " "the relationship." % (self.prop,) ) + else: + + not_target = util.column_set( + self.parent_persist_selectable.c + ).difference(self.child_persist_selectable.c) + + for _, rmt in self.local_remote_pairs: + if rmt in not_target: + util.warn( + "Expression %s is marked as 'remote', but these " + "column(s) are local to the local side. The " + "remote() annotation is needed only for a " + "self-referential relationship where both sides " + "of the relationship refer to the same tables." + % (rmt,) + ) def _check_foreign_cols(self, join_condition, primary): """Check the foreign key columns collected and emit error diff --git a/test/orm/test_cycles.py b/test/orm/test_cycles.py index 9d0369191e0..1d749cac975 100644 --- a/test/orm/test_cycles.py +++ b/test/orm/test_cycles.py @@ -918,7 +918,6 @@ def test_post_update_m2o(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, post_update=True, _legacy_inactive_history_style=( self._legacy_inactive_history_style @@ -1036,7 +1035,6 @@ def test_post_update_backref(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, _legacy_inactive_history_style=( self._legacy_inactive_history_style ), @@ -1096,7 +1094,6 @@ def test_post_update_o2m(self): favorite=relationship( Ball, primaryjoin=person.c.favorite_ball_id == ball.c.id, - remote_side=person.c.favorite_ball_id, _legacy_inactive_history_style=( self._legacy_inactive_history_style ), diff --git a/test/orm/test_relationships.py b/test/orm/test_relationships.py index 1dc5b37fd2d..e98068eddb6 100644 --- a/test/orm/test_relationships.py +++ b/test/orm/test_relationships.py @@ -2588,6 +2588,55 @@ class C2(object): registry.map_imperatively(C2, t3) assert C1.c2.property.primaryjoin.compare(t1.c.id == t3.c.t1id) + @testing.combinations( + "annotation", "local_remote", argnames="remote_anno_type" + ) + @testing.combinations("orm_col", "core_col", argnames="use_col_from") + def test_no_remote_on_local_only_cols( + self, decl_base, remote_anno_type, use_col_from + ): + """test #7094. + + a warning should be emitted for an inappropriate remote_side argument + + """ + + class A(decl_base): + __tablename__ = "a" + + id = Column(Integer, primary_key=True) + data = Column(String) + + if remote_anno_type == "annotation": + if use_col_from == "core_col": + bs = relationship( + "B", + primaryjoin=lambda: remote(A.__table__.c.id) + == B.__table__.c.a_id, + ) + elif use_col_from == "orm_col": + bs = relationship( + "B", primaryjoin="remote(A.id) == B.a_id" + ) + elif remote_anno_type == "local_remote": + if use_col_from == "core_col": + bs = relationship( + "B", remote_side=lambda: A.__table__.c.id + ) + elif use_col_from == "orm_col": + bs = relationship("B", remote_side="A.id") + + class B(decl_base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + + with expect_warnings( + r"Expression a.id is marked as 'remote', but these column\(s\) " + r"are local to the local side. " + ): + decl_base.registry.configure() + def test_join_error_raised(self, registry): m = MetaData() t1 = Table("t1", m, Column("id", Integer, primary_key=True)) From e3a71aadd7637824e5a6937118668f304460d003 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sun, 25 Sep 2022 16:37:15 +0200 Subject: [PATCH 381/632] `aggregate_order_by` now supports cache generation. also adjusted CacheKeyFixture to be a general purpose fixture so that sub-components / dialects can run their own cache key tests. Fixes: #8574 Change-Id: I6c66107856aee11e548d357cea77bceee3e316a0 (cherry picked from commit 7980b677085fc759a0406f6778b9729955f3c7f6) --- doc/build/changelog/unreleased_14/8574.rst | 5 + lib/sqlalchemy/dialects/postgresql/ext.py | 7 +- lib/sqlalchemy/testing/fixtures.py | 108 +++++++++++++++++++++ test/dialect/postgresql/test_compiler.py | 33 +++++++ test/orm/test_cache_key.py | 5 +- test/orm/test_deprecations.py | 2 +- test/sql/test_compare.py | 105 +------------------- 7 files changed, 156 insertions(+), 109 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8574.rst diff --git a/doc/build/changelog/unreleased_14/8574.rst b/doc/build/changelog/unreleased_14/8574.rst new file mode 100644 index 00000000000..ffc1761c301 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8574.rst @@ -0,0 +1,5 @@ +.. change:: + :tags: usecase, postgresql + :tickets: 8574 + + :class:`_postgresql.aggregate_order_by` now supports cache generation. diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index 9e52ee1ee9f..e6b992e88a9 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -14,6 +14,7 @@ from ...sql import roles from ...sql import schema from ...sql.schema import ColumnCollectionConstraint +from ...sql.visitors import InternalTraversal class aggregate_order_by(expression.ColumnElement): @@ -54,7 +55,11 @@ class aggregate_order_by(expression.ColumnElement): __visit_name__ = "aggregate_order_by" stringify_dialect = "postgresql" - inherit_cache = False + _traverse_internals = [ + ("target", InternalTraversal.dp_clauseelement), + ("type", InternalTraversal.dp_type), + ("order_by", InternalTraversal.dp_clauseelement), + ] def __init__(self, target, *order_by): self.target = coercions.expect(roles.ExpressionElementRole, target) diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index 0a2d63b5480..999647b5b19 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -6,6 +6,7 @@ # the MIT License: https://www.opensource.org/licenses/mit-license.php import contextlib +import itertools import re import sys @@ -13,6 +14,8 @@ from . import assertions from . import config from . import schema +from .assertions import eq_ +from .assertions import ne_ from .entities import BasicEntity from .entities import ComparableEntity from .entities import ComparableMixin # noqa @@ -24,6 +27,8 @@ from ..orm import registry from ..orm.decl_api import DeclarativeMeta from ..schema import sort_tables_and_constraints +from ..sql import visitors +from ..sql.elements import ClauseElement @config.mark_base_test_class() @@ -868,3 +873,106 @@ def define_tables(cls, metadata): Computed("normal * 42", persisted=True), ) ) + + +class CacheKeyFixture(object): + def _compare_equal(self, a, b, compare_values): + a_key = a._generate_cache_key() + b_key = b._generate_cache_key() + + if a_key is None: + assert a._annotations.get("nocache") + + assert b_key is None + else: + + eq_(a_key.key, b_key.key) + eq_(hash(a_key.key), hash(b_key.key)) + + for a_param, b_param in zip(a_key.bindparams, b_key.bindparams): + assert a_param.compare(b_param, compare_values=compare_values) + return a_key, b_key + + def _run_cache_key_fixture(self, fixture, compare_values): + case_a = fixture() + case_b = fixture() + + for a, b in itertools.combinations_with_replacement( + range(len(case_a)), 2 + ): + if a == b: + a_key, b_key = self._compare_equal( + case_a[a], case_b[b], compare_values + ) + if a_key is None: + continue + else: + a_key = case_a[a]._generate_cache_key() + b_key = case_b[b]._generate_cache_key() + + if a_key is None or b_key is None: + if a_key is None: + assert case_a[a]._annotations.get("nocache") + if b_key is None: + assert case_b[b]._annotations.get("nocache") + continue + + if a_key.key == b_key.key: + for a_param, b_param in zip( + a_key.bindparams, b_key.bindparams + ): + if not a_param.compare( + b_param, compare_values=compare_values + ): + break + else: + # this fails unconditionally since we could not + # find bound parameter values that differed. + # Usually we intended to get two distinct keys here + # so the failure will be more descriptive using the + # ne_() assertion. + ne_(a_key.key, b_key.key) + else: + ne_(a_key.key, b_key.key) + + # ClauseElement-specific test to ensure the cache key + # collected all the bound parameters that aren't marked + # as "literal execute" + if isinstance(case_a[a], ClauseElement) and isinstance( + case_b[b], ClauseElement + ): + assert_a_params = [] + assert_b_params = [] + + for elem in visitors.iterate(case_a[a]): + if elem.__visit_name__ == "bindparam": + assert_a_params.append(elem) + + for elem in visitors.iterate(case_b[b]): + if elem.__visit_name__ == "bindparam": + assert_b_params.append(elem) + + # note we're asserting the order of the params as well as + # if there are dupes or not. ordering has to be + # deterministic and matches what a traversal would provide. + eq_( + sorted(a_key.bindparams, key=lambda b: b.key), + sorted( + util.unique_list(assert_a_params), key=lambda b: b.key + ), + ) + eq_( + sorted(b_key.bindparams, key=lambda b: b.key), + sorted( + util.unique_list(assert_b_params), key=lambda b: b.key + ), + ) + + def _run_cache_key_equal_fixture(self, fixture, compare_values): + case_a = fixture() + case_b = fixture() + + for a, b in itertools.combinations_with_replacement( + range(len(case_a)), 2 + ): + self._compare_equal(case_a[a], case_b[b], compare_values) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index d85ae9152fd..897909b158b 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -3347,3 +3347,36 @@ def test_fetch(self, fetch, offset, fetch_kw, exp, params): "SELECT 1 " + exp, checkparams=params, ) + + +class CacheKeyTest(fixtures.CacheKeyFixture, fixtures.TestBase): + def test_aggregate_order_by(self): + """test #8574""" + + self._run_cache_key_fixture( + lambda: ( + aggregate_order_by(column("a"), column("a")), + aggregate_order_by(column("a"), column("b")), + aggregate_order_by(column("a"), column("a").desc()), + aggregate_order_by(column("a"), column("a").nulls_first()), + aggregate_order_by( + column("a"), column("a").desc().nulls_first() + ), + aggregate_order_by(column("a", Integer), column("b")), + aggregate_order_by(column("a"), column("b"), column("c")), + aggregate_order_by(column("a"), column("c"), column("b")), + aggregate_order_by( + column("a"), column("b").desc(), column("c") + ), + aggregate_order_by( + column("a"), column("b").nulls_first(), column("c") + ), + aggregate_order_by( + column("a"), column("b").desc().nulls_first(), column("c") + ), + aggregate_order_by( + column("a", Integer), column("a"), column("b") + ), + ), + compare_values=False, + ) diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index daf963952c8..23fec61d2a0 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -42,7 +42,6 @@ from test.orm import _fixtures from .inheritance import _poly_fixtures from .test_query import QueryTest -from ..sql.test_compare import CacheKeyFixture def stmt_20(*elements): @@ -52,7 +51,7 @@ def stmt_20(*elements): ) -class CacheKeyTest(CacheKeyFixture, _fixtures.FixtureTest): +class CacheKeyTest(fixtures.CacheKeyFixture, _fixtures.FixtureTest): run_setup_mappers = "once" run_inserts = None run_deletes = None @@ -586,7 +585,7 @@ class MyOpt(CacheableOptions): ) -class PolyCacheKeyTest(CacheKeyFixture, _poly_fixtures._Polymorphic): +class PolyCacheKeyTest(fixtures.CacheKeyFixture, _poly_fixtures._Polymorphic): run_setup_mappers = "once" run_inserts = None run_deletes = None diff --git a/test/orm/test_deprecations.py b/test/orm/test_deprecations.py index 8febf3b3fcf..bbfcd0cfd39 100644 --- a/test/orm/test_deprecations.py +++ b/test/orm/test_deprecations.py @@ -77,6 +77,7 @@ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertsql import CompiledSQL +from sqlalchemy.testing.fixtures import CacheKeyFixture from sqlalchemy.testing.fixtures import ComparableEntity from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.mock import call @@ -105,7 +106,6 @@ from .test_options import QueryTest as OptionsQueryTest from .test_query import QueryTest from .test_transaction import _LocalFixture -from ..sql.test_compare import CacheKeyFixture join_aliased_dep = ( diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index f73e9864d37..6cee271c9c1 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -1053,110 +1053,7 @@ def eight(): ] -class CacheKeyFixture(object): - def _compare_equal(self, a, b, compare_values): - a_key = a._generate_cache_key() - b_key = b._generate_cache_key() - - if a_key is None: - assert a._annotations.get("nocache") - - assert b_key is None - else: - - eq_(a_key.key, b_key.key) - eq_(hash(a_key.key), hash(b_key.key)) - - for a_param, b_param in zip(a_key.bindparams, b_key.bindparams): - assert a_param.compare(b_param, compare_values=compare_values) - return a_key, b_key - - def _run_cache_key_fixture(self, fixture, compare_values): - case_a = fixture() - case_b = fixture() - - for a, b in itertools.combinations_with_replacement( - range(len(case_a)), 2 - ): - if a == b: - a_key, b_key = self._compare_equal( - case_a[a], case_b[b], compare_values - ) - if a_key is None: - continue - else: - a_key = case_a[a]._generate_cache_key() - b_key = case_b[b]._generate_cache_key() - - if a_key is None or b_key is None: - if a_key is None: - assert case_a[a]._annotations.get("nocache") - if b_key is None: - assert case_b[b]._annotations.get("nocache") - continue - - if a_key.key == b_key.key: - for a_param, b_param in zip( - a_key.bindparams, b_key.bindparams - ): - if not a_param.compare( - b_param, compare_values=compare_values - ): - break - else: - # this fails unconditionally since we could not - # find bound parameter values that differed. - # Usually we intended to get two distinct keys here - # so the failure will be more descriptive using the - # ne_() assertion. - ne_(a_key.key, b_key.key) - else: - ne_(a_key.key, b_key.key) - - # ClauseElement-specific test to ensure the cache key - # collected all the bound parameters that aren't marked - # as "literal execute" - if isinstance(case_a[a], ClauseElement) and isinstance( - case_b[b], ClauseElement - ): - assert_a_params = [] - assert_b_params = [] - - for elem in visitors.iterate(case_a[a]): - if elem.__visit_name__ == "bindparam": - assert_a_params.append(elem) - - for elem in visitors.iterate(case_b[b]): - if elem.__visit_name__ == "bindparam": - assert_b_params.append(elem) - - # note we're asserting the order of the params as well as - # if there are dupes or not. ordering has to be - # deterministic and matches what a traversal would provide. - eq_( - sorted(a_key.bindparams, key=lambda b: b.key), - sorted( - util.unique_list(assert_a_params), key=lambda b: b.key - ), - ) - eq_( - sorted(b_key.bindparams, key=lambda b: b.key), - sorted( - util.unique_list(assert_b_params), key=lambda b: b.key - ), - ) - - def _run_cache_key_equal_fixture(self, fixture, compare_values): - case_a = fixture() - case_b = fixture() - - for a, b in itertools.combinations_with_replacement( - range(len(case_a)), 2 - ): - self._compare_equal(case_a[a], case_b[b], compare_values) - - -class CacheKeyTest(CacheKeyFixture, CoreFixtures, fixtures.TestBase): +class CacheKeyTest(fixtures.CacheKeyFixture, CoreFixtures, fixtures.TestBase): # we are slightly breaking the policy of not having external dialect # stuff in here, but use pg/mysql as test cases to ensure that these # objects don't report an inaccurate cache key, which is dependent From f5ff7163211055fc2acd1ef59caa81693de9e113 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Wed, 29 Jun 2022 07:38:38 -0600 Subject: [PATCH 382/632] Document user-defined functions for sqlite Change-Id: I64e4d4dce8c5f5aced3190f9e3682c630462a61e (cherry picked from commit 48a0df55c1cfb8746eec8073c0feb05be1652665) --- lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 8 +++++ lib/sqlalchemy/dialects/sqlite/pysqlite.py | 33 +++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index 9fc6d355ca8..04adabfb6a1 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -34,6 +34,14 @@ The URL passes through all arguments to the ``pysqlite`` driver, so all connection arguments are the same as they are for that of :ref:`pysqlite`. +.. _aiosqlite_udfs: + +User-Defined Functions +---------------------- + +aiosqlite extends pysqlite to support async, so we can create our own user-defined functions (UDFs) +in Python and use them directly in SQLite queries as described here: :ref:`pysqlite_udfs`. + """ # noqa diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index 1aae5610dfc..d9fa9413e78 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -399,6 +399,39 @@ def do_begin(conn): `sqlite3 module breaks transactions and potentially corrupts data `_ - on the Python bug tracker +.. _pysqlite_udfs: + +User-Defined Functions +---------------------- + +pysqlite supports a `create_function() `_ +method that allows us to create our own user-defined functions (UDFs) in Python and use them directly in SQLite queries. +These functions are registered with a specific DBAPI Connection. + +SQLAlchemy uses connection pooling with file-based SQLite databases, so we need to ensure that the UDF is attached to the +connection when it is created. That is accomplished with an event listener:: + + from sqlalchemy import create_engine + from sqlalchemy import event + from sqlalchemy import text + + + def udf(): + return "udf-ok" + + + engine = create_engine("sqlite:///./db_file") + + + @event.listens_for(engine, "connect") + def connect(conn, rec): + conn.create_function("udf", 0, udf) + + + for i in range(5): + with engine.connect() as conn: + print(conn.scalar(text("SELECT UDF()"))) + """ # noqa From 709dd763505bdc37e44968dc41b3849c7173ec4f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 29 Sep 2022 17:32:54 -0400 Subject: [PATCH 383/632] rewrite the first section of ORM quickstart Adapted from 1158cf3a872e22e3136cccb05e975d Change-Id: Ic124321b6d6fb50827be26d1917ce158683ce82f --- doc/build/orm/quickstart.rst | 55 +++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 13 deletions(-) diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index 95cace9183a..f15fa4a6c7c 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -59,19 +59,48 @@ real SQL tables that exist, or will exist, in a particular database:: ... def __repr__(self): ... return f"Address(id={self.id!r}, email_address={self.email_address!r})" -Above, the declarative mapping makes use of :class:`_schema.Column` objects -to define the basic units of data storage that will be in the database. -The :func:`_orm.relationship` construct defines linkages between two -:term:`mapped` classes, ``User`` and ``Address`` above. - -The schema contains necessary elements such as primary key constraints set up -by the :paramref:`_schema.Column.primary_key` parameter, a -:term:`foreign key constraint` configured using :class:`_schema.ForeignKey` -(which is used by :func:`_orm.relationship` as well), and datatypes for columns -including :class:`_types.Integer` and :class:`_types.String`. - -More on table metadata and an intro to ORM declared mapping is in the -Tutorial at :ref:`tutorial_working_with_metadata`. + +The mapping starts with a base class, which above is called ``Base``, and is +created by calling upon the :func:`_orm.declarative_base` function, which +produces a new base class. + +Individual mapped classes are then created by making subclasses of ``Base``. +A mapped class typically refers to a single particular database table, +the name of which is indicated by using the ``__tablename__`` class-level +attribute. + +Next, columns that are part of the table are declared, by adding attributes +linked to the :class:`_schema.Column` construct. :class:`_schema.Column` +describes all aspects of a database column, including typing +information with type objects such as :class:`.Integer` and :class:`.String` +as well as server defaults and +constraint information, such as membership within the primary key and foreign +keys. + +All ORM mapped classes require at least one column be declared as part of the +primary key, typically by using the :paramref:`_schema.Column.primary_key` +parameter on those :class:`_schema.Column` objects that should be part +of the key. In the above example, the ``User.id`` and ``Address.id`` +columns are marked as primary key. + +Taken together, the combination of a string table name as well as a list +of column declarations is referred towards in SQLAlchemy as :term:`table metadata`. +Setting up table metadata using both Core and ORM approaches is introduced +in the :ref:`unified_tutorial` at :ref:`tutorial_working_with_metadata`. +The above mapping is an example of what's referred towards as +:ref:`Declarative Table ` +configuration. + +Other Declarative directives are available, most commonly +the :func:`_orm.relationship` construct indicated above. In contrast +to the column-based attributes, :func:`_orm.relationship` denotes a linkage +between two ORM classes. In the above example, ``User.addresses`` links +``User`` to ``Address``, and ``Address.user`` links ``Address`` to ``User``. +The :func:`_orm.relationship` construct is introduced in the +:ref:`unified_tutorial` at :ref:`tutorial_orm_related_objects`. + +Finally, the above example classes include a ``__repr__()`` method, which is +not required but is useful for debugging. Create an Engine ------------------ From 74128a2abdded702ac1ebb700a8a6e554fd57a19 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 30 Sep 2022 13:07:04 -0400 Subject: [PATCH 384/632] add autobuild; improve a few session docs this is from the writeonly patch, some doc edits became more general so will backport these to 1.4. Change-Id: I19231e4bcfa33a0742c8995b6059c9a9488b1a6f (cherry picked from commit abcd088551fda5490ad56c401a8e8260fae0dcfd) --- doc/build/Makefile | 7 +++- doc/build/glossary.rst | 23 +++++++++---- doc/build/orm/session_basics.rst | 2 ++ lib/sqlalchemy/orm/session.py | 55 ++++++++++++++++++++++++++++---- 4 files changed, 73 insertions(+), 14 deletions(-) diff --git a/doc/build/Makefile b/doc/build/Makefile index 09d8b29da15..e9684a20738 100644 --- a/doc/build/Makefile +++ b/doc/build/Makefile @@ -4,6 +4,7 @@ # You can set these variables from the command line. SPHINXOPTS = -T -j auto SPHINXBUILD = sphinx-build +AUTOBUILD = sphinx-autobuild --port 8080 --watch ../../lib PAPER = BUILDDIR = output @@ -14,11 +15,12 @@ ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext +.PHONY: help clean html autobuild dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest dist-html site-mako gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" + @echo " autobuild autobuild and run a webserver" @echo " gettext to make PO message catalogs" @echo " dist-html same as html, but places files in /doc" @echo " dirhtml to make HTML files named index.html in directories" @@ -45,6 +47,9 @@ html: @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." +autobuild: + $(AUTOBUILD) $(ALLSPHINXOPTS) $(BUILDDIR)/html + gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 9c8f01d02ef..111adb13b9d 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -659,17 +659,28 @@ Glossary `Domain Model (via Wikipedia) `_ unit of work - This pattern is where the system transparently keeps - track of changes to objects and periodically flushes all those - pending changes out to the database. SQLAlchemy's Session - implements this pattern fully in a manner similar to that of - Hibernate. + A software architecture where a persistence system such as an object + relational mapper maintains a list of changes made to a series of + objects, and periodically flushes all those pending changes out to the + database. + + SQLAlchemy's :class:`_orm.Session` implements the unit of work pattern, + where objects that are added to the :class:`_orm.Session` using methods + like :meth:`_orm.Session.add` will then participate in unit-of-work + style persistence. + + For a walk-through of what unit of work persistence looks like in + SQLAlchemy, start with the section :ref:`tutorial_orm_data_manipulation` + in the :ref:`unified_tutorial`. Then for more detail, see + :ref:`session_basics` in the general reference documentation. .. seealso:: `Unit of Work (via Martin Fowler) `_ - :doc:`orm/session` + :ref:`tutorial_orm_data_manipulation` + + :ref:`session_basics` expire expired diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 2815492dd50..16b2cae5f81 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -314,6 +314,7 @@ via standard methods such as :meth:`_engine.Result.all`, :ref:`migration_20_toplevel` +.. _session_adding: Adding New or Existing Items @@ -343,6 +344,7 @@ The :meth:`~.Session.add` operation **cascades** along the ``save-update`` cascade. For more details see the section :ref:`unitofwork_cascades`. +.. _session_deleting: Deleting -------- diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 96a273a3598..4b05381db20 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2603,13 +2603,28 @@ def _remove_newly_deleted(self, states): persistent_to_deleted(self, state) def add(self, instance, _warn=True): - """Place an object in the ``Session``. + """Place an object into this :class:`_orm.Session`. - Its state will be persisted to the database on the next flush - operation. + Objects that are in the :term:`transient` state when passed to the + :meth:`_orm.Session.add` method will move to the + :term:`pending` state, until the next flush, at which point they + will move to the :term:`persistent` state. - Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` - is ``expunge()``. + Objects that are in the :term:`detached` state when passed to the + :meth:`_orm.Session.add` method will move to the :term:`persistent` + state directly. + + If the transaction used by the :class:`_orm.Session` is rolled back, + objects which were transient when they were passed to + :meth:`_orm.Session.add` will be moved back to the + :term:`transient` state, and will no longer be present within this + :class:`_orm.Session`. + + .. seealso:: + + :meth:`_orm.Session.add_all` + + :ref:`session_adding` - at :ref:`session_basics` """ if _warn and self._warn_on_events: @@ -2626,7 +2641,18 @@ def add(self, instance, _warn=True): self._save_or_update_state(state) def add_all(self, instances): - """Add the given collection of instances to this ``Session``.""" + """Add the given collection of instances to this :class:`_orm.Session`. + + See the documentation for :meth:`_orm.Session.add` for a general + behavioral description. + + .. seealso:: + + :meth:`_orm.Session.add` + + :ref:`session_adding` - at :ref:`session_basics` + + """ if self._warn_on_events: self._flush_warning("Session.add_all()") @@ -2647,7 +2673,22 @@ def _save_or_update_state(self, state): def delete(self, instance): """Mark an instance as deleted. - The database delete operation occurs upon ``flush()``. + The object is assumed to be either :term:`persistent` or + :term:`detached` when passed; after the method is called, the + object will remain in the :term:`persistent` state until the next + flush proceeds. During this time, the object will also be a member + of the :attr:`_orm.Session.deleted` collection. + + When the next flush proceeds, the object will move to the + :term:`deleted` state, indicating a ``DELETE`` statement was emitted + for its row within the current transaction. When the transaction + is successfully committed, + the deleted object is moved to the :term:`detached` state and is + no longer present within this :class:`_orm.Session`. + + .. seealso:: + + :ref:`session_deleting` - at :ref:`session_basics` """ if self._warn_on_events: From c41b83afb58a445c0a97bf030e1e3b81c1f507f0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 2 Oct 2022 14:04:43 -0400 Subject: [PATCH 385/632] experiment w/ docs formatter on SQLA 1.4 Enhanced the "{sql}" thing some more so that it maintains these tags exactly as they were. Note that the "{sql}" and "{stop}" tags are intended to be on the Python code lines, not the SQL lines, so special handling to find these, preserve them, then add them back after python code is formatted is added here. Change-Id: I07acd3ea54608cd63bee8003679f8dff131a90f4 --- doc/build/changelog/changelog_04.rst | 4 +- doc/build/changelog/changelog_08.rst | 11 +- doc/build/changelog/changelog_09.rst | 8 +- doc/build/changelog/migration_04.rst | 156 +++-- doc/build/changelog/migration_05.rst | 55 +- doc/build/changelog/migration_06.rst | 71 +- doc/build/changelog/migration_07.rst | 67 +- doc/build/changelog/migration_08.rst | 344 ++++----- doc/build/changelog/migration_09.rst | 222 +++--- doc/build/changelog/migration_10.rst | 411 ++++++----- doc/build/changelog/migration_11.rst | 448 ++++++------ doc/build/changelog/migration_12.rst | 251 +++---- doc/build/changelog/migration_13.rst | 206 +++--- doc/build/changelog/migration_14.rst | 183 ++--- doc/build/changelog/migration_20.rst | 300 ++++---- doc/build/changelog/unreleased_14/8525.rst | 2 +- doc/build/changelog/unreleased_14/8569.rst | 2 +- doc/build/core/connections.rst | 108 ++- doc/build/core/constraints.rst | 352 +++++----- doc/build/core/custom_types.rst | 111 +-- doc/build/core/ddl.rst | 61 +- doc/build/core/defaults.rst | 144 ++-- doc/build/core/engines.rst | 80 +-- doc/build/core/event.rst | 31 +- doc/build/core/functions.rst | 2 +- doc/build/core/future.rst | 1 + doc/build/core/metadata.rst | 121 ++-- doc/build/core/operators.rst | 203 +++--- doc/build/core/pooling.rst | 42 +- doc/build/core/reflection.rst | 50 +- doc/build/core/tutorial.rst | 663 +++++++++--------- doc/build/core/type_basics.rst | 6 +- doc/build/dialects/mssql.rst | 38 +- doc/build/dialects/mysql.rst | 42 +- doc/build/dialects/oracle.rst | 25 +- doc/build/dialects/postgresql.rst | 50 +- doc/build/dialects/sqlite.rst | 21 +- doc/build/errors.rst | 120 ++-- doc/build/faq/connections.rst | 23 +- doc/build/faq/metadata_schema.rst | 5 +- doc/build/faq/ormconfiguration.rst | 40 +- doc/build/faq/performance.rst | 20 +- doc/build/faq/sessions.rst | 31 +- doc/build/faq/sqlexpressions.rst | 40 +- doc/build/faq/thirdparty.rst | 13 +- doc/build/glossary.rst | 68 +- doc/build/intro.rst | 2 +- doc/build/orm/basic_relationships.rst | 5 +- doc/build/orm/cascades.rst | 35 +- doc/build/orm/collections.rst | 13 +- doc/build/orm/composites.rst | 24 +- doc/build/orm/contextual.rst | 1 + doc/build/orm/dataclasses.rst | 92 +-- doc/build/orm/declarative_config.rst | 25 +- doc/build/orm/declarative_mixins.rst | 29 +- doc/build/orm/declarative_styles.rst | 2 +- doc/build/orm/declarative_tables.rst | 16 +- doc/build/orm/extensions/associationproxy.rst | 20 +- doc/build/orm/extensions/asyncio.rst | 17 +- doc/build/orm/extensions/baked.rst | 6 +- .../orm/extensions/declarative/mixins.rst | 2 +- doc/build/orm/extensions/mypy.rst | 17 +- doc/build/orm/inheritance.rst | 4 +- doc/build/orm/inheritance_loading.rst | 221 +++--- doc/build/orm/join_conditions.rst | 251 ++++--- doc/build/orm/loading.rst | 2 +- doc/build/orm/loading_columns.rst | 71 +- doc/build/orm/loading_relationships.rst | 217 +++--- doc/build/orm/mapped_attributes.rst | 59 +- doc/build/orm/mapped_sql_expr.rst | 121 ++-- doc/build/orm/mapping_columns.rst | 47 +- doc/build/orm/mapping_styles.rst | 52 +- doc/build/orm/nonstandard_mappings.rst | 55 +- doc/build/orm/persistence_techniques.rst | 139 ++-- doc/build/orm/queryguide.rst | 163 +++-- doc/build/orm/quickstart.rst | 12 +- doc/build/orm/relationship_persistence.rst | 90 +-- doc/build/orm/self_referential.rst | 49 +- doc/build/orm/session_basics.rst | 52 +- doc/build/orm/session_events.rst | 39 +- doc/build/orm/session_state_management.rst | 28 +- doc/build/orm/session_transaction.rst | 101 ++- doc/build/orm/tutorial.rst | 343 ++++----- doc/build/orm/versioning.rst | 36 +- doc/build/tutorial/data_insert.rst | 24 +- doc/build/tutorial/data_select.rst | 317 ++++----- doc/build/tutorial/data_update.rst | 105 ++- doc/build/tutorial/dbapi_transactions.rst | 21 +- doc/build/tutorial/metadata.rst | 24 +- doc/build/tutorial/orm_data_manipulation.rst | 12 +- doc/build/tutorial/orm_related_objects.rst | 118 ++-- tools/format_docs_code.py | 395 +++++++++++ 92 files changed, 4784 insertions(+), 3942 deletions(-) create mode 100644 tools/format_docs_code.py diff --git a/doc/build/changelog/changelog_04.rst b/doc/build/changelog/changelog_04.rst index 9261c1262bc..10e632c93cf 100644 --- a/doc/build/changelog/changelog_04.rst +++ b/doc/build/changelog/changelog_04.rst @@ -540,9 +540,7 @@ to work for subclasses, if they are present, for example:: - sess.query(Company).options( - eagerload_all( - )) + sess.query(Company).options(eagerload_all()) to load Company objects, their employees, and the 'machines' collection of employees who happen to be diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst index f6be2e3e19c..3bf8f67f207 100644 --- a/doc/build/changelog/changelog_08.rst +++ b/doc/build/changelog/changelog_08.rst @@ -970,7 +970,7 @@ del_ = delete(SomeMappedClass).where(SomeMappedClass.id == 5) - upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name='ed') + upd = update(SomeMappedClass).where(SomeMappedClass.id == 5).values(name="ed") .. change:: :tags: bug, orm @@ -2079,8 +2079,7 @@ to the original, older use case for :meth:`_query.Query.select_from`, which is that of restating the mapped entity in terms of a different selectable:: - session.query(User.name).\ - select_from(user_table.select().where(user_table.c.id > 5)) + session.query(User.name).select_from(user_table.select().where(user_table.c.id > 5)) Which produces:: @@ -2281,11 +2280,11 @@ original. Allows symmetry when using :class:`_engine.Engine` and :class:`_engine.Connection` objects as context managers:: - with conn.connect() as c: # leaves the Connection open - c.execute("...") + with conn.connect() as c: # leaves the Connection open + c.execute("...") with engine.connect() as c: # closes the Connection - c.execute("...") + c.execute("...") .. change:: :tags: engine diff --git a/doc/build/changelog/changelog_09.rst b/doc/build/changelog/changelog_09.rst index c9ec5f3a49a..d00e043326e 100644 --- a/doc/build/changelog/changelog_09.rst +++ b/doc/build/changelog/changelog_09.rst @@ -1708,15 +1708,15 @@ ad-hoc keyword arguments within the :attr:`.Index.kwargs` collection, after construction:: - idx = Index('a', 'b') - idx.kwargs['mysql_someargument'] = True + idx = Index("a", "b") + idx.kwargs["mysql_someargument"] = True To suit the use case of allowing custom arguments at construction time, the :meth:`.DialectKWArgs.argument_for` method now allows this registration:: - Index.argument_for('mysql', 'someargument', False) + Index.argument_for("mysql", "someargument", False) - idx = Index('a', 'b', mysql_someargument=True) + idx = Index("a", "b", mysql_someargument=True) .. seealso:: diff --git a/doc/build/changelog/migration_04.rst b/doc/build/changelog/migration_04.rst index b5031340794..93a2b654fbc 100644 --- a/doc/build/changelog/migration_04.rst +++ b/doc/build/changelog/migration_04.rst @@ -27,7 +27,7 @@ Secondly, anywhere you used to say ``engine=``, :: - myengine = create_engine('sqlite://') + myengine = create_engine("sqlite://") meta = MetaData(myengine) @@ -56,6 +56,7 @@ In 0.3, this code worked: from sqlalchemy import * + class UTCDateTime(types.TypeDecorator): pass @@ -66,6 +67,7 @@ In 0.4, one must do: from sqlalchemy import * from sqlalchemy import types + class UTCDateTime(types.TypeDecorator): pass @@ -119,7 +121,7 @@ when working with mapped classes: :: - session.query(User).filter(and_(User.name == 'fred', User.id > 17)) + session.query(User).filter(and_(User.name == "fred", User.id > 17)) While simple column-based comparisons are no big deal, the class attributes have some new "higher level" constructs @@ -139,18 +141,18 @@ available, including what was previously only available in # return all users who contain a particular address with # the email_address like '%foo%' - filter(User.addresses.any(Address.email_address.like('%foo%'))) + filter(User.addresses.any(Address.email_address.like("%foo%"))) # same, email address equals 'foo@bar.com'. can fall back to keyword # args for simple comparisons - filter(User.addresses.any(email_address = 'foo@bar.com')) + filter(User.addresses.any(email_address="foo@bar.com")) # return all Addresses whose user attribute has the username 'ed' - filter(Address.user.has(name='ed')) + filter(Address.user.has(name="ed")) # return all Addresses whose user attribute has the username 'ed' # and an id > 5 (mixing clauses with kwargs) - filter(Address.user.has(User.id > 5, name='ed')) + filter(Address.user.has(User.id > 5, name="ed")) The ``Column`` collection remains available on mapped classes in the ``.c`` attribute. Note that property-based @@ -199,12 +201,20 @@ any ``Alias`` objects: :: # standard self-referential TreeNode mapper with backref - mapper(TreeNode, tree_nodes, properties={ - 'children':relation(TreeNode, backref=backref('parent', remote_side=tree_nodes.id)) - }) + mapper( + TreeNode, + tree_nodes, + properties={ + "children": relation( + TreeNode, backref=backref("parent", remote_side=tree_nodes.id) + ) + }, + ) # query for node with child containing "bar" two levels deep - session.query(TreeNode).join(["children", "children"], aliased=True).filter_by(name='bar') + session.query(TreeNode).join(["children", "children"], aliased=True).filter_by( + name="bar" + ) To add criterion for each table along the way in an aliased join, you can use ``from_joinpoint`` to keep joining against @@ -215,15 +225,15 @@ the same line of aliases: # search for the treenode along the path "n1/n12/n122" # first find a Node with name="n122" - q = sess.query(Node).filter_by(name='n122') + q = sess.query(Node).filter_by(name="n122") # then join to parent with "n12" - q = q.join('parent', aliased=True).filter_by(name='n12') + q = q.join("parent", aliased=True).filter_by(name="n12") # join again to the next parent with 'n1'. use 'from_joinpoint' # so we join from the previous point, instead of joining off the # root table - q = q.join('parent', aliased=True, from_joinpoint=True).filter_by(name='n1') + q = q.join("parent", aliased=True, from_joinpoint=True).filter_by(name="n1") node = q.first() @@ -271,17 +281,24 @@ deep you want to go. Lets show the self-referential :: - nodes = Table('nodes', metadata, - Column('id', Integer, primary_key=True), - Column('parent_id', Integer, ForeignKey('nodes.id')), - Column('name', String(30))) + nodes = Table( + "nodes", + metadata, + Column("id", Integer, primary_key=True), + Column("parent_id", Integer, ForeignKey("nodes.id")), + Column("name", String(30)), + ) + class TreeNode(object): pass - mapper(TreeNode, nodes, properties={ - 'children':relation(TreeNode, lazy=False, join_depth=3) - }) + + mapper( + TreeNode, + nodes, + properties={"children": relation(TreeNode, lazy=False, join_depth=3)}, + ) So what happens when we say: @@ -324,10 +341,13 @@ new type, ``Point``. Stores an x/y coordinate: def __init__(self, x, y): self.x = x self.y = y + def __composite_values__(self): return self.x, self.y + def __eq__(self, other): return other.x == self.x and other.y == self.y + def __ne__(self, other): return not self.__eq__(other) @@ -341,13 +361,15 @@ Let's create a table of vertices storing two points per row: :: - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) + vertices = Table( + "vertices", + metadata, + Column("id", Integer, primary_key=True), + Column("x1", Integer), + Column("y1", Integer), + Column("x2", Integer), + Column("y2", Integer), + ) Then, map it ! We'll create a ``Vertex`` object which stores two ``Point`` objects: @@ -359,10 +381,15 @@ stores two ``Point`` objects: self.start = start self.end = end - mapper(Vertex, vertices, properties={ - 'start':composite(Point, vertices.c.x1, vertices.c.y1), - 'end':composite(Point, vertices.c.x2, vertices.c.y2) - }) + + mapper( + Vertex, + vertices, + properties={ + "start": composite(Point, vertices.c.x1, vertices.c.y1), + "end": composite(Point, vertices.c.x2, vertices.c.y2), + }, + ) Once you've set up your composite type, it's usable just like any other type: @@ -370,7 +397,7 @@ like any other type: :: - v = Vertex(Point(3, 4), Point(26,15)) + v = Vertex(Point(3, 4), Point(26, 15)) session.save(v) session.flush() @@ -388,7 +415,7 @@ work as primary keys too, and are usable in ``query.get()``: # a Document class which uses a composite Version # object as primary key - document = query.get(Version(1, 'a')) + document = query.get(Version(1, "a")) ``dynamic_loader()`` relations ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -438,16 +465,12 @@ eager in one pass: :: - mapper(Foo, foo_table, properties={ - 'bar':relation(Bar) - }) - mapper(Bar, bar_table, properties={ - 'bat':relation(Bat) - }) + mapper(Foo, foo_table, properties={"bar": relation(Bar)}) + mapper(Bar, bar_table, properties={"bat": relation(Bat)}) mapper(Bat, bat_table) # eager load bar and bat - session.query(Foo).options(eagerload_all('bar.bat')).filter(...).all() + session.query(Foo).options(eagerload_all("bar.bat")).filter(...).all() New Collection API ^^^^^^^^^^^^^^^^^^ @@ -471,7 +494,7 @@ many needs: # use a dictionary relation keyed by a column relation(Item, collection_class=column_mapped_collection(items.c.keyword)) # or named attribute - relation(Item, collection_class=attribute_mapped_collection('keyword')) + relation(Item, collection_class=attribute_mapped_collection("keyword")) # or any function you like relation(Item, collection_class=mapped_collection(lambda entity: entity.a + entity.b)) @@ -493,12 +516,20 @@ columns or subqueries: :: - mapper(User, users, properties={ - 'fullname': column_property((users.c.firstname + users.c.lastname).label('fullname')), - 'numposts': column_property( - select([func.count(1)], users.c.id==posts.c.user_id).correlate(users).label('posts') - ) - }) + mapper( + User, + users, + properties={ + "fullname": column_property( + (users.c.firstname + users.c.lastname).label("fullname") + ), + "numposts": column_property( + select([func.count(1)], users.c.id == posts.c.user_id) + .correlate(users) + .label("posts") + ), + }, + ) a typical query looks like: @@ -534,7 +565,7 @@ your ``engine`` (or anywhere): from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker - engine = create_engine('myengine://') + engine = create_engine("myengine://") Session = sessionmaker(bind=engine, autoflush=True, transactional=True) # use the new Session() freely @@ -542,7 +573,6 @@ your ``engine`` (or anywhere): sess.save(someobject) sess.flush() - If you need to post-configure your Session, say with an engine, add it later with ``configure()``: @@ -562,7 +592,7 @@ with both ``sessionmaker`` as well as ``create_session()``: Session = scoped_session(sessionmaker(autoflush=True, transactional=True)) Session.configure(bind=engine) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) @@ -573,7 +603,6 @@ with both ``sessionmaker`` as well as ``create_session()``: sess2 = Session() assert sess is sess2 - When using a thread-local ``Session``, the returned class has all of ``Session's`` interface implemented as classmethods, and "assignmapper"'s functionality is @@ -586,11 +615,10 @@ old ``objectstore`` days.... # "assignmapper"-like functionality available via ScopedSession.mapper Session.mapper(User, users_table) - u = User(name='wendy') + u = User(name="wendy") Session.commit() - Sessions are again Weak Referencing By Default ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -624,13 +652,13 @@ Also, ``autoflush=True`` means the ``Session`` will Session = sessionmaker(bind=engine, autoflush=True, transactional=True) - u = User(name='wendy') + u = User(name="wendy") sess = Session() sess.save(u) # wendy is flushed, comes right back from a query - wendy = sess.query(User).filter_by(name='wendy').one() + wendy = sess.query(User).filter_by(name="wendy").one() Transactional methods moved onto sessions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -649,7 +677,7 @@ background). # use the session - sess.commit() # commit transaction + sess.commit() # commit transaction Sharing a ``Session`` with an enclosing engine-level (i.e. non-ORM) transaction is easy: @@ -745,7 +773,7 @@ Just like it says: :: - b = bindparam('foo', type_=String) + b = bindparam("foo", type_=String) in\_ Function Changed to Accept Sequence or Selectable ------------------------------------------------------ @@ -847,8 +875,18 @@ Out Parameters for Oracle :: - result = engine.execute(text("begin foo(:x, :y, :z); end;", bindparams=[bindparam('x', Numeric), outparam('y', Numeric), outparam('z', Numeric)]), x=5) - assert result.out_parameters == {'y':10, 'z':75} + result = engine.execute( + text( + "begin foo(:x, :y, :z); end;", + bindparams=[ + bindparam("x", Numeric), + outparam("y", Numeric), + outparam("z", Numeric), + ], + ), + x=5, + ) + assert result.out_parameters == {"y": 10, "z": 75} Connection-bound ``MetaData``, ``Sessions`` ------------------------------------------- diff --git a/doc/build/changelog/migration_05.rst b/doc/build/changelog/migration_05.rst index 64b69e15230..3d7bb52df30 100644 --- a/doc/build/changelog/migration_05.rst +++ b/doc/build/changelog/migration_05.rst @@ -64,15 +64,21 @@ Object Relational Mapping :: - session.query(User.name, func.count(Address.id).label("numaddresses")).join(Address).group_by(User.name) + session.query(User.name, func.count(Address.id).label("numaddresses")).join( + Address + ).group_by(User.name) The tuples returned by any multi-column/entity query are *named*' tuples: :: - for row in session.query(User.name, func.count(Address.id).label('numaddresses')).join(Address).group_by(User.name): - print("name", row.name, "number", row.numaddresses) + for row in ( + session.query(User.name, func.count(Address.id).label("numaddresses")) + .join(Address) + .group_by(User.name) + ): + print("name", row.name, "number", row.numaddresses) ``Query`` has a ``statement`` accessor, as well as a ``subquery()`` method which allow ``Query`` to be used to @@ -80,10 +86,15 @@ Object Relational Mapping :: - subq = session.query(Keyword.id.label('keyword_id')).filter(Keyword.name.in_(['beans', 'carrots'])).subquery() - recipes = session.query(Recipe).filter(exists(). - where(Recipe.id==recipe_keywords.c.recipe_id). - where(recipe_keywords.c.keyword_id==subq.c.keyword_id) + subq = ( + session.query(Keyword.id.label("keyword_id")) + .filter(Keyword.name.in_(["beans", "carrots"])) + .subquery() + ) + recipes = session.query(Recipe).filter( + exists() + .where(Recipe.id == recipe_keywords.c.recipe_id) + .where(recipe_keywords.c.keyword_id == subq.c.keyword_id) ) * **Explicit ORM aliases are recommended for aliased joins** @@ -223,17 +234,24 @@ Object Relational Mapping :: - mapper(User, users, properties={ - 'addresses':relation(Address, order_by=addresses.c.id) - }, order_by=users.c.id) + mapper( + User, + users, + properties={"addresses": relation(Address, order_by=addresses.c.id)}, + order_by=users.c.id, + ) To set ordering on a backref, use the ``backref()`` function: :: - 'keywords':relation(Keyword, secondary=item_keywords, - order_by=keywords.c.name, backref=backref('items', order_by=items.c.id)) + "keywords": relation( + Keyword, + secondary=item_keywords, + order_by=keywords.c.name, + backref=backref("items", order_by=items.c.id), + ) Using declarative ? To help with the new ``order_by`` requirement, ``order_by`` and friends can now be set using @@ -244,7 +262,7 @@ Object Relational Mapping class MyClass(MyDeclarativeBase): ... - 'addresses':relation("Address", order_by="Address.id") + "addresses": relation("Address", order_by="Address.id") It's generally a good idea to set ``order_by`` on ``relation()s`` which load list-based collections of @@ -402,14 +420,17 @@ Schema/Types convert_result_value methods """ + def bind_processor(self, dialect): def convert(value): return self.convert_bind_param(value, dialect) + return convert def result_processor(self, dialect): def convert(value): return self.convert_result_value(value, dialect) + return convert def convert_result_value(self, value, dialect): @@ -461,10 +482,10 @@ Schema/Types dt = datetime.datetime(2008, 6, 27, 12, 0, 0, 125) # 125 usec # old way - '2008-06-27 12:00:00.125' + "2008-06-27 12:00:00.125" # new way - '2008-06-27 12:00:00.000125' + "2008-06-27 12:00:00.000125" So if an existing SQLite file-based database intends to be used across 0.4 and 0.5, you either have to upgrade the @@ -481,6 +502,7 @@ Schema/Types :: from sqlalchemy.databases.sqlite import DateTimeMixin + DateTimeMixin.__legacy_microseconds__ = True Connection Pool no longer threadlocal by default @@ -522,7 +544,7 @@ data-driven, it takes ``[args]``. :: - query.join('orders', 'items') + query.join("orders", "items") query.join(User.orders, Order.items) * the ``in_()`` method on columns and similar only accepts a @@ -605,6 +627,7 @@ Removed :: from sqlalchemy.orm import aliased + address_alias = aliased(Address) print(session.query(User, address_alias).join((address_alias, User.addresses)).all()) diff --git a/doc/build/changelog/migration_06.rst b/doc/build/changelog/migration_06.rst index 0867fefe029..73c57bd9311 100644 --- a/doc/build/changelog/migration_06.rst +++ b/doc/build/changelog/migration_06.rst @@ -73,7 +73,7 @@ will use psycopg2: :: - create_engine('postgresql://scott:tiger@localhost/test') + create_engine("postgresql://scott:tiger@localhost/test") However to specify a specific DBAPI backend such as pg8000, add it to the "protocol" section of the URL using a plus @@ -81,7 +81,7 @@ sign "+": :: - create_engine('postgresql+pg8000://scott:tiger@localhost/test') + create_engine("postgresql+pg8000://scott:tiger@localhost/test") Important Dialect Links: @@ -138,8 +138,15 @@ set of PG types: :: - from sqlalchemy.dialects.postgresql import INTEGER, BIGINT, SMALLINT,\ - VARCHAR, MACADDR, DATE, BYTEA + from sqlalchemy.dialects.postgresql import ( + INTEGER, + BIGINT, + SMALLINT, + VARCHAR, + MACADDR, + DATE, + BYTEA, + ) Above, ``INTEGER`` is actually the plain ``INTEGER`` type from ``sqlalchemy.types``, but the PG dialect makes it @@ -164,7 +171,7 @@ object returns another ``ClauseElement``: :: >>> from sqlalchemy.sql import column - >>> column('foo') == 5 + >>> column("foo") == 5 This so that Python expressions produce SQL expressions when @@ -172,16 +179,15 @@ converted to strings: :: - >>> str(column('foo') == 5) + >>> str(column("foo") == 5) 'foo = :foo_1' But what happens if we say this? :: - >>> if column('foo') == 5: + >>> if column("foo") == 5: ... print("yes") - ... In previous versions of SQLAlchemy, the returned ``_BinaryExpression`` was a plain Python object which @@ -191,11 +197,11 @@ as to that being compared. Meaning: :: - >>> bool(column('foo') == 5) + >>> bool(column("foo") == 5) False - >>> bool(column('foo') == column('foo')) + >>> bool(column("foo") == column("foo")) False - >>> c = column('foo') + >>> c = column("foo") >>> bool(c == c) True >>> @@ -252,7 +258,7 @@ sets: :: - connection.execute(table.insert(), {'data':'row1'}, {'data':'row2'}, {'data':'row3'}) + connection.execute(table.insert(), {"data": "row1"}, {"data": "row2"}, {"data": "row3"}) When the ``Connection`` object sends off the given ``insert()`` construct for compilation, it passes to the @@ -268,10 +274,12 @@ works: :: - connection.execute(table.insert(), - {'timestamp':today, 'data':'row1'}, - {'timestamp':today, 'data':'row2'}, - {'data':'row3'}) + connection.execute( + table.insert(), + {"timestamp": today, "data": "row1"}, + {"timestamp": today, "data": "row2"}, + {"data": "row3"}, + ) Because the third row does not specify the 'timestamp' column. Previous versions of SQLAlchemy would simply insert @@ -392,7 +400,7 @@ with tables or metadata objects: from sqlalchemy.schema import DDL - DDL('CREATE TRIGGER users_trigger ...').execute_at('after-create', metadata) + DDL("CREATE TRIGGER users_trigger ...").execute_at("after-create", metadata) Now the full suite of DDL constructs are available under the same system, including those for CREATE TABLE, ADD @@ -402,7 +410,7 @@ CONSTRAINT, etc.: from sqlalchemy.schema import Constraint, AddConstraint - AddContraint(CheckConstraint("value > 5")).execute_at('after-create', mytable) + AddContraint(CheckConstraint("value > 5")).execute_at("after-create", mytable) Additionally, all the DDL objects are now regular ``ClauseElement`` objects just like any other SQLAlchemy @@ -428,20 +436,22 @@ make your own: from sqlalchemy.schema import DDLElement from sqlalchemy.ext.compiler import compiles - class AlterColumn(DDLElement): + class AlterColumn(DDLElement): def __init__(self, column, cmd): self.column = column self.cmd = cmd + @compiles(AlterColumn) def visit_alter_column(element, compiler, **kw): return "ALTER TABLE %s ALTER COLUMN %s %s ..." % ( element.column.table.name, element.column.name, - element.cmd + element.cmd, ) + engine.execute(AlterColumn(table.c.mycolumn, "SET DEFAULT 'test'")) Deprecated/Removed Schema Elements @@ -566,6 +576,7 @@ To use an inspector: :: from sqlalchemy.engine.reflection import Inspector + insp = Inspector.from_engine(my_engine) print(insp.get_schema_names()) @@ -578,10 +589,10 @@ such as that of PostgreSQL which provides a :: - my_engine = create_engine('postgresql://...') + my_engine = create_engine("postgresql://...") pg_insp = Inspector.from_engine(my_engine) - print(pg_insp.get_table_oid('my_table')) + print(pg_insp.get_table_oid("my_table")) RETURNING Support ================= @@ -600,10 +611,10 @@ columns will be returned as a regular result set: result = connection.execute( - table.insert().values(data='some data').returning(table.c.id, table.c.timestamp) - ) + table.insert().values(data="some data").returning(table.c.id, table.c.timestamp) + ) row = result.first() - print("ID:", row['id'], "Timestamp:", row['timestamp']) + print("ID:", row["id"], "Timestamp:", row["timestamp"]) The implementation of RETURNING across the four supported backends varies wildly, in the case of Oracle requiring an @@ -740,7 +751,7 @@ that converts unicode back to utf-8, or whatever is desired: def process_result_value(self, value, dialect): if isinstance(value, unicode): - value = value.encode('utf-8') + value = value.encode("utf-8") return value Note that the ``assert_unicode`` flag is now deprecated. @@ -968,9 +979,11 @@ At mapper level: :: mapper(Child, child) - mapper(Parent, parent, properties={ - 'child':relationship(Child, lazy='joined', innerjoin=True) - }) + mapper( + Parent, + parent, + properties={"child": relationship(Child, lazy="joined", innerjoin=True)}, + ) At query time level: diff --git a/doc/build/changelog/migration_07.rst b/doc/build/changelog/migration_07.rst index a222f5380bd..4763b9134c4 100644 --- a/doc/build/changelog/migration_07.rst +++ b/doc/build/changelog/migration_07.rst @@ -244,7 +244,7 @@ with an explicit onclause is now: :: - query.join(SomeClass, SomeClass.id==ParentClass.some_id) + query.join(SomeClass, SomeClass.id == ParentClass.some_id) In 0.6, this usage was considered to be an error, because ``join()`` accepts multiple arguments corresponding to @@ -336,10 +336,12 @@ to the creation of the index outside of the Table. That is: :: - Table('mytable', metadata, - Column('id',Integer, primary_key=True), - Column('name', String(50), nullable=False), - Index('idx_name', 'name') + Table( + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("name", String(50), nullable=False), + Index("idx_name", "name"), ) The primary rationale here is for the benefit of declarative @@ -348,14 +350,16 @@ The primary rationale here is for the benefit of declarative :: class HasNameMixin(object): - name = Column('name', String(50), nullable=False) + name = Column("name", String(50), nullable=False) + @declared_attr def __table_args__(cls): - return (Index('name'), {}) + return (Index("name"), {}) + class User(HasNameMixin, Base): - __tablename__ = 'user' - id = Column('id', Integer, primary_key=True) + __tablename__ = "user" + id = Column("id", Integer, primary_key=True) `Indexes `_ @@ -385,17 +389,16 @@ tutorial: from sqlalchemy.sql import table, column, select, func - empsalary = table('empsalary', - column('depname'), - column('empno'), - column('salary')) + empsalary = table("empsalary", column("depname"), column("empno"), column("salary")) - s = select([ + s = select( + [ empsalary, - func.avg(empsalary.c.salary). - over(partition_by=empsalary.c.depname). - label('avg') - ]) + func.avg(empsalary.c.salary) + .over(partition_by=empsalary.c.depname) + .label("avg"), + ] + ) print(s) @@ -495,7 +498,7 @@ equivalent to: :: - query.from_self(func.count(literal_column('1'))).scalar() + query.from_self(func.count(literal_column("1"))).scalar() Previously, internal logic attempted to rewrite the columns clause of the query itself, and upon detection of a @@ -534,6 +537,7 @@ be used: :: from sqlalchemy import func + session.query(func.count(MyClass.id)).scalar() or for ``count(*)``: @@ -541,7 +545,8 @@ or for ``count(*)``: :: from sqlalchemy import func, literal_column - session.query(func.count(literal_column('*'))).select_from(MyClass).scalar() + + session.query(func.count(literal_column("*"))).select_from(MyClass).scalar() LIMIT/OFFSET clauses now use bind parameters -------------------------------------------- @@ -690,8 +695,11 @@ function, can be mapped. from sqlalchemy import select, func from sqlalchemy.orm import mapper + class Subset(object): pass + + selectable = select(["x", "y", "z"]).select_from(func.some_db_function()).alias() mapper(Subset, selectable, primary_key=[selectable.c.x]) @@ -773,10 +781,11 @@ mutations, the type object must be constructed with :: - Table('mytable', metadata, + Table( + "mytable", + metadata, # .... - - Column('pickled_data', PickleType(mutable=True)) + Column("pickled_data", PickleType(mutable=True)), ) The ``mutable=True`` flag is being phased out, in favor of @@ -1036,7 +1045,7 @@ key column ``id``, the following now produces an error: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) mapper(FooBar, foobar) This because the ``mapper()`` refuses to guess what column @@ -1047,10 +1056,8 @@ explicit: :: - foobar = foo.join(bar, foo.c.id==bar.c.foo_id) - mapper(FooBar, foobar, properties={ - 'id':[foo.c.id, bar.c.id] - }) + foobar = foo.join(bar, foo.c.id == bar.c.foo_id) + mapper(FooBar, foobar, properties={"id": [foo.c.id, bar.c.id]}) :ticket:`1896` @@ -1231,14 +1238,14 @@ backend: :: - select([mytable], distinct='ALL', prefixes=['HIGH_PRIORITY']) + select([mytable], distinct="ALL", prefixes=["HIGH_PRIORITY"]) The ``prefixes`` keyword or ``prefix_with()`` method should be used for non-standard or unusual prefixes: :: - select([mytable]).prefix_with('HIGH_PRIORITY', 'ALL') + select([mytable]).prefix_with("HIGH_PRIORITY", "ALL") ``useexisting`` superseded by ``extend_existing`` and ``keep_existing`` ----------------------------------------------------------------------- diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst index a4dc58549ff..4a07518539b 100644 --- a/doc/build/changelog/migration_08.rst +++ b/doc/build/changelog/migration_08.rst @@ -71,16 +71,17 @@ entities. The new system includes these features: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - child_id_one = Column(Integer, ForeignKey('child.id')) - child_id_two = Column(Integer, ForeignKey('child.id')) + child_id_one = Column(Integer, ForeignKey("child.id")) + child_id_two = Column(Integer, ForeignKey("child.id")) child_one = relationship("Child", foreign_keys=child_id_one) child_two = relationship("Child", foreign_keys=child_id_two) + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) * relationships against self-referential, composite foreign @@ -90,11 +91,11 @@ entities. The new system includes these features: :: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -102,10 +103,9 @@ entities. The new system includes these features: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, the ``Folder`` refers to its parent ``Folder`` joining from ``account_id`` to itself, and ``parent_id`` @@ -144,18 +144,19 @@ entities. The new system includes these features: expected in most cases:: class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The new :func:`_orm.relationship` mechanics make use of a SQLAlchemy concept known as :term:`annotations`. These annotations @@ -167,8 +168,9 @@ entities. The new system includes these features: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -176,11 +178,10 @@ entities. The new system includes these features: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. seealso:: @@ -226,12 +227,11 @@ certain contexts, such as :class:`.AliasedInsp` and A walkthrough of some key capabilities follows:: >>> class User(Base): - ... __tablename__ = 'user' + ... __tablename__ = "user" ... id = Column(Integer, primary_key=True) ... name = Column(String) ... name_syn = synonym(name) ... addresses = relationship("Address") - ... >>> # universal entry point is inspect() >>> b = inspect(User) @@ -285,7 +285,7 @@ A walkthrough of some key capabilities follows:: "user".id = address.user_id >>> # inspect works on instances - >>> u1 = User(id=3, name='x') + >>> u1 = User(id=3, name="x") >>> b = inspect(u1) >>> # it returns the InstanceState @@ -354,10 +354,11 @@ usable anywhere: :: from sqlalchemy.orm import with_polymorphic + palias = with_polymorphic(Person, [Engineer, Manager]) - session.query(Company).\ - join(palias, Company.employees).\ - filter(or_(Engineer.language=='java', Manager.hair=='pointy')) + session.query(Company).join(palias, Company.employees).filter( + or_(Engineer.language == "java", Manager.hair == "pointy") + ) .. seealso:: @@ -377,9 +378,11 @@ by combining it with the new :func:`.with_polymorphic` function:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) The method now works equally well in most places a regular relationship attribute is accepted, including with loader functions like @@ -389,26 +392,28 @@ and :meth:`.PropComparator.has`:: # use eager loading in conjunction with with_polymorphic targets Job_P = with_polymorphic(Job, [SubJob, ExtraJob], aliased=True) - q = s.query(DataContainer).\ - join(DataContainer.jobs.of_type(Job_P)).\ - options(contains_eager(DataContainer.jobs.of_type(Job_P))) + q = ( + s.query(DataContainer) + .join(DataContainer.jobs.of_type(Job_P)) + .options(contains_eager(DataContainer.jobs.of_type(Job_P))) + ) # pass subclasses to eager loads (implicitly applies with_polymorphic) - q = s.query(ParentThing).\ - options( - joinedload_all( - ParentThing.container, - DataContainer.jobs.of_type(SubJob) - )) + q = s.query(ParentThing).options( + joinedload_all(ParentThing.container, DataContainer.jobs.of_type(SubJob)) + ) # control self-referential aliasing with any()/has() Job_A = aliased(Job) - q = s.query(Job).join(DataContainer.jobs).\ - filter( - DataContainer.jobs.of_type(Job_A).\ - any(and_(Job_A.id < Job.id, Job_A.type=='fred') - ) - ) + q = ( + s.query(Job) + .join(DataContainer.jobs) + .filter( + DataContainer.jobs.of_type(Job_A).any( + and_(Job_A.id < Job.id, Job_A.type == "fred") + ) + ) + ) .. seealso:: @@ -429,13 +434,15 @@ with a declarative base class:: Base = declarative_base() + @event.listens_for("load", Base, propagate=True) def on_load(target, context): print("New instance loaded:", target) + # on_load() will be applied to SomeClass class SomeClass(Base): - __tablename__ = 'sometable' + __tablename__ = "sometable" # ... @@ -453,8 +460,9 @@ can be referred to via dotted name in expressions:: class Snack(Base): # ... - peanuts = relationship("nuts.Peanut", - primaryjoin="nuts.Peanut.snack_id == Snack.id") + peanuts = relationship( + "nuts.Peanut", primaryjoin="nuts.Peanut.snack_id == Snack.id" + ) The resolution allows that any full or partial disambiguating package name can be used. If the @@ -484,17 +492,22 @@ in one step: class ReflectedOne(DeferredReflection, Base): __abstract__ = True + class ReflectedTwo(DeferredReflection, Base): __abstract__ = True + class MyClass(ReflectedOne): - __tablename__ = 'mytable' + __tablename__ = "mytable" + class MyOtherClass(ReflectedOne): - __tablename__ = 'myothertable' + __tablename__ = "myothertable" + class YetAnotherClass(ReflectedTwo): - __tablename__ = 'yetanothertable' + __tablename__ = "yetanothertable" + ReflectedOne.prepare(engine_one) ReflectedTwo.prepare(engine_two) @@ -535,10 +548,9 @@ Below, we emit an UPDATE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - update({"data":"x"}) + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).update({"data": "x"}) In particular, updates to joined-inheritance entities are supported, provided the target of the UPDATE is local to the @@ -548,10 +560,9 @@ given ``Engineer`` as a joined subclass of ``Person``: :: - query(Engineer).\ - filter(Person.id==Engineer.id).\ - filter(Person.name=='dilbert').\ - update({"engineer_data":"java"}) + query(Engineer).filter(Person.id == Engineer.id).filter( + Person.name == "dilbert" + ).update({"engineer_data": "java"}) would produce: @@ -649,6 +660,7 @@ For example, to add logarithm support to :class:`.Numeric` types: from sqlalchemy.types import Numeric from sqlalchemy.sql import func + class CustomNumeric(Numeric): class comparator_factory(Numeric.Comparator): def log(self, other): @@ -659,16 +671,17 @@ The new type is usable like any other type: :: - data = Table('data', metadata, - Column('id', Integer, primary_key=True), - Column('x', CustomNumeric(10, 5)), - Column('y', CustomNumeric(10, 5)) - ) + data = Table( + "data", + metadata, + Column("id", Integer, primary_key=True), + Column("x", CustomNumeric(10, 5)), + Column("y", CustomNumeric(10, 5)), + ) stmt = select([data.c.x.log(data.c.y)]).where(data.c.x.log(2) < value) print(conn.execute(stmt).fetchall()) - New features which have come from this immediately include support for PostgreSQL's HSTORE type, as well as new operations associated with PostgreSQL's ARRAY @@ -696,11 +709,13 @@ support this syntax, including PostgreSQL, SQLite, and MySQL. It is not the same thing as the usual ``executemany()`` style of INSERT which remains unchanged:: - users.insert().values([ - {"name": "some name"}, - {"name": "some other name"}, - {"name": "yet another name"}, - ]) + users.insert().values( + [ + {"name": "some name"}, + {"name": "some other name"}, + {"name": "yet another name"}, + ] + ) .. seealso:: @@ -721,6 +736,7 @@ functionality, except on the database side:: from sqlalchemy.types import String from sqlalchemy import func, Table, Column, MetaData + class LowerString(String): def bind_expression(self, bindvalue): return func.lower(bindvalue) @@ -728,18 +744,15 @@ functionality, except on the database side:: def column_expression(self, col): return func.lower(col) + metadata = MetaData() - test_table = Table( - 'test_table', - metadata, - Column('data', LowerString) - ) + test_table = Table("test_table", metadata, Column("data", LowerString)) Above, the ``LowerString`` type defines a SQL expression that will be emitted whenever the ``test_table.c.data`` column is rendered in the columns clause of a SELECT statement:: - >>> print(select([test_table]).where(test_table.c.data == 'HI')) + >>> print(select([test_table]).where(test_table.c.data == "HI")) SELECT lower(test_table.data) AS data FROM test_table WHERE test_table.data = lower(:data_1) @@ -789,16 +802,17 @@ against a particular target selectable:: signatures = relationship("Signature", lazy=False) + class Signature(Base): __tablename__ = "signature" id = Column(Integer, primary_key=True) sig_count = column_property( - select([func.count('*')]).\ - where(SnortEvent.signature == id). - correlate_except(SnortEvent) - ) + select([func.count("*")]) + .where(SnortEvent.signature == id) + .correlate_except(SnortEvent) + ) .. seealso:: @@ -818,19 +832,16 @@ and containment methods such as from sqlalchemy.dialects.postgresql import HSTORE - data = Table('data_table', metadata, - Column('id', Integer, primary_key=True), - Column('hstore_data', HSTORE) - ) - - engine.execute( - select([data.c.hstore_data['some_key']]) - ).scalar() + data = Table( + "data_table", + metadata, + Column("id", Integer, primary_key=True), + Column("hstore_data", HSTORE), + ) - engine.execute( - select([data.c.hstore_data.matrix()]) - ).scalar() + engine.execute(select([data.c.hstore_data["some_key"]])).scalar() + engine.execute(select([data.c.hstore_data.matrix()])).scalar() .. seealso:: @@ -861,30 +872,20 @@ results: The type also introduces new operators, using the new type-specific operator framework. New operations include indexed access:: - result = conn.execute( - select([mytable.c.arraycol[2]]) - ) + result = conn.execute(select([mytable.c.arraycol[2]])) slice access in SELECT:: - result = conn.execute( - select([mytable.c.arraycol[2:4]]) - ) + result = conn.execute(select([mytable.c.arraycol[2:4]])) slice updates in UPDATE:: - conn.execute( - mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]}) - ) + conn.execute(mytable.update().values({mytable.c.arraycol[2:3]: [7, 8]})) freestanding array literals:: >>> from sqlalchemy.dialects import postgresql - >>> conn.scalar( - ... select([ - ... postgresql.array([1, 2]) + postgresql.array([3, 4, 5]) - ... ]) - ... ) + >>> conn.scalar(select([postgresql.array([1, 2]) + postgresql.array([3, 4, 5])])) [1, 2, 3, 4, 5] array concatenation, where below, the right side ``[4, 5, 6]`` is coerced into an array literal:: @@ -912,20 +913,24 @@ everything else. :: - Column('sometimestamp', sqlite.DATETIME(truncate_microseconds=True)) - Column('sometimestamp', sqlite.DATETIME( - storage_format=( - "%(year)04d%(month)02d%(day)02d" - "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" - ), - regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})" - ) - ) - Column('somedate', sqlite.DATE( - storage_format="%(month)02d/%(day)02d/%(year)04d", - regexp="(?P\d+)/(?P\d+)/(?P\d+)", - ) - ) + Column("sometimestamp", sqlite.DATETIME(truncate_microseconds=True)) + Column( + "sometimestamp", + sqlite.DATETIME( + storage_format=( + "%(year)04d%(month)02d%(day)02d" + "%(hour)02d%(minute)02d%(second)02d%(microsecond)06d" + ), + regexp="(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(\d{6})", + ), + ) + Column( + "somedate", + sqlite.DATE( + storage_format="%(month)02d/%(day)02d/%(year)04d", + regexp="(?P\d+)/(?P\d+)/(?P\d+)", + ), + ) Huge thanks to Nate Dub for the sprinting on this at Pycon 2012. @@ -946,7 +951,7 @@ The "collate" keyword, long accepted by the MySQL dialect, is now established on all :class:`.String` types and will render on any backend, including when features such as :meth:`_schema.MetaData.create_all` and :func:`.cast` is used:: - >>> stmt = select([cast(sometable.c.somechar, String(20, collation='utf8'))]) + >>> stmt = select([cast(sometable.c.somechar, String(20, collation="utf8"))]) >>> print(stmt) SELECT CAST(sometable.somechar AS VARCHAR(20) COLLATE "utf8") AS anon_1 FROM sometable @@ -1047,33 +1052,35 @@ The new behavior allows the following test case to work:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(64)) + class UserKeyword(Base): - __tablename__ = 'user_keyword' - user_id = Column(Integer, ForeignKey('user.id'), primary_key=True) - keyword_id = Column(Integer, ForeignKey('keyword.id'), primary_key=True) + __tablename__ = "user_keyword" + user_id = Column(Integer, ForeignKey("user.id"), primary_key=True) + keyword_id = Column(Integer, ForeignKey("keyword.id"), primary_key=True) - user = relationship(User, - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + user = relationship( + User, backref=backref("user_keywords", cascade="all, delete-orphan") + ) - keyword = relationship("Keyword", - backref=backref("user_keywords", - cascade="all, delete-orphan") - ) + keyword = relationship( + "Keyword", backref=backref("user_keywords", cascade="all, delete-orphan") + ) # uncomment this to enable the old behavior # __mapper_args__ = {"legacy_is_orphan": True} + class Keyword(Base): - __tablename__ = 'keyword' + __tablename__ = "keyword" id = Column(Integer, primary_key=True) - keyword = Column('keyword', String(64)) + keyword = Column("keyword", String(64)) + from sqlalchemy import create_engine from sqlalchemy.orm import Session @@ -1103,7 +1110,6 @@ The new behavior allows the following test case to work:: session.commit() - :ticket:`2655` The after_attach event fires after the item is associated with the Session instead of before; before_attach added @@ -1129,9 +1135,9 @@ use cases should use the new "before_attach" event: @event.listens_for(Session, "before_attach") def before_attach(session, instance): - instance.some_necessary_attribute = session.query(Widget).\ - filter_by(instance.widget_name).\ - first() + instance.some_necessary_attribute = ( + session.query(Widget).filter_by(instance.widget_name).first() + ) :ticket:`2464` @@ -1146,11 +1152,13 @@ parent: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - correlate(Parent).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = ( + session.query(Entity.value) + .filter(Entity.id == Parent.entity_id) + .correlate(Parent) + .as_scalar() + ) + session.query(Parent).filter(subq == "some value") This was the opposite behavior of a plain ``select()`` construct which would assume auto-correlation by default. @@ -1158,10 +1166,8 @@ The above statement in 0.8 will correlate automatically: :: - subq = session.query(Entity.value).\ - filter(Entity.id==Parent.entity_id).\ - as_scalar() - session.query(Parent).filter(subq=="some value") + subq = session.query(Entity.value).filter(Entity.id == Parent.entity_id).as_scalar() + session.query(Parent).filter(subq == "some value") like in ``select()``, correlation can be disabled by calling ``query.correlate(None)`` or manually set by passing an @@ -1187,8 +1193,8 @@ objects relative to what's being selected:: from sqlalchemy.sql import table, column, select - t1 = table('t1', column('x')) - t2 = table('t2', column('y')) + t1 = table("t1", column("x")) + t2 = table("t2", column("y")) s = select([t1, t2]).correlate(t1) print(s) @@ -1263,8 +1269,8 @@ doing something like this: :: - scalar_subq = select([someothertable.c.id]).where(someothertable.c.data=='foo') - select([sometable]).where(sometable.c.id==scalar_subq) + scalar_subq = select([someothertable.c.id]).where(someothertable.c.data == "foo") + select([sometable]).where(sometable.c.id == scalar_subq) SQL Server doesn't allow an equality comparison to a scalar SELECT, that is, "x = (SELECT something)". The MSSQL dialect @@ -1313,32 +1319,28 @@ key would be ignored, inconsistently versus when :: # before 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # would be accessible like this - s.c.col1 # would raise AttributeError + s.c.column_one # would be accessible like this + s.c.col1 # would raise AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # would raise AttributeError - s.c.table1_col1 # would be accessible like this + s.c.table1_column_one # would raise AttributeError + s.c.table1_col1 # would be accessible like this In 0.8, :attr:`_schema.Column.key` is honored in both cases: :: # with 0.8 - table1 = Table('t1', metadata, - Column('col1', Integer, key='column_one') - ) + table1 = Table("t1", metadata, Column("col1", Integer, key="column_one")) s = select([table1]) - s.c.column_one # works - s.c.col1 # AttributeError + s.c.column_one # works + s.c.col1 # AttributeError s = select([table1]).apply_labels() - s.c.table1_column_one # works - s.c.table1_col1 # AttributeError + s.c.table1_column_one # works + s.c.table1_col1 # AttributeError All other behavior regarding "name" and "key" are the same, including that the rendered SQL will still use the form @@ -1408,8 +1410,8 @@ warning: :: - t1 = table('t1', column('x')) - t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" + t1 = table("t1", column("x")) + t1.insert().values(x=5, z=5) # raises "Unconsumed column names: z" :ticket:`2415` @@ -1439,7 +1441,7 @@ always compared case-insensitively: :: >>> row = result.fetchone() - >>> row['foo'] == row['FOO'] == row['Foo'] + >>> row["foo"] == row["FOO"] == row["Foo"] True This was for the benefit of a few dialects which in the diff --git a/doc/build/changelog/migration_09.rst b/doc/build/changelog/migration_09.rst index 70fa49e3439..2e45695abba 100644 --- a/doc/build/changelog/migration_09.rst +++ b/doc/build/changelog/migration_09.rst @@ -60,8 +60,7 @@ Using a :class:`_query.Query` in conjunction with a composite attribute now retu type maintained by that composite, rather than being broken out into individual columns. Using the mapping setup at :ref:`mapper_composite`:: - >>> session.query(Vertex.start, Vertex.end).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start, Vertex.end).filter(Vertex.start == Point(3, 4)).all() [(Point(x=3, y=4), Point(x=5, y=6))] This change is backwards-incompatible with code that expects the individual attribute @@ -69,8 +68,9 @@ to be expanded into individual columns. To get that behavior, use the ``.clause accessor:: - >>> session.query(Vertex.start.clauses, Vertex.end.clauses).\ - ... filter(Vertex.start == Point(3, 4)).all() + >>> session.query(Vertex.start.clauses, Vertex.end.clauses).filter( + ... Vertex.start == Point(3, 4) + ... ).all() [(3, 4, 5, 6)] .. seealso:: @@ -93,9 +93,11 @@ Consider the following example against the usual ``User`` mapping:: select_stmt = select([User]).where(User.id == 7).alias() - q = session.query(User).\ - join(select_stmt, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .join(select_stmt, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) The above statement predictably renders SQL like the following:: @@ -109,10 +111,12 @@ If we wanted to reverse the order of the left and right elements of the JOIN, the documentation would lead us to believe we could use :meth:`_query.Query.select_from` to do so:: - q = session.query(User).\ - select_from(select_stmt).\ - join(User, User.id == select_stmt.c.id).\ - filter(User.name == 'ed') + q = ( + session.query(User) + .select_from(select_stmt) + .join(User, User.id == select_stmt.c.id) + .filter(User.name == "ed") + ) However, in version 0.8 and earlier, the above use of :meth:`_query.Query.select_from` would apply the ``select_stmt`` to **replace** the ``User`` entity, as it @@ -137,7 +141,7 @@ to selecting from a customized :func:`.aliased` construct:: select_stmt = select([User]).where(User.id == 7) user_from_stmt = aliased(User, select_stmt.alias()) - q = session.query(user_from_stmt).filter(user_from_stmt.name == 'ed') + q = session.query(user_from_stmt).filter(user_from_stmt.name == "ed") So with SQLAlchemy 0.9, our query that selects from ``select_stmt`` produces the SQL we expect:: @@ -180,17 +184,20 @@ The change is illustrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) a = relationship("A", backref=backref("bs", viewonly=True)) + e = create_engine("sqlite://") Base.metadata.create_all(e) @@ -229,16 +236,17 @@ the "association" row being present or not when the comparison is against Consider this mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(Integer, ForeignKey('b.id'), primary_key=True) + b_id = Column(Integer, ForeignKey("b.id"), primary_key=True) b = relationship("B") b_value = association_proxy("b", "value") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) value = Column(String) @@ -323,21 +331,24 @@ proxied value. E.g.:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) bname = association_proxy("b", "name") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) name = Column(String) + a1 = A() # this is how m2o's always have worked @@ -370,17 +381,19 @@ This is a small change demonstrated as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) sess = Session(e) - a1 = A(data='a1') + a1 = A(data="a1") sess.add(a1) sess.commit() # a1 is now expired @@ -388,11 +401,23 @@ This is a small change demonstrated as follows:: assert inspect(a1).attrs.data.history == (None, None, None) # in 0.8, this would fail to load the unloaded state. - assert attributes.get_history(a1, 'data') == ((), ['a1',], ()) + assert attributes.get_history(a1, "data") == ( + (), + [ + "a1", + ], + (), + ) # load_history() is now equivalent to get_history() with # passive=PASSIVE_OFF ^ INIT_OK - assert inspect(a1).attrs.data.load_history() == ((), ['a1',], ()) + assert inspect(a1).attrs.data.load_history() == ( + (), + [ + "a1", + ], + (), + ) :ticket:`2787` @@ -452,14 +477,10 @@ use the :meth:`.TypeEngine.with_variant` method:: from sqlalchemy.dialects.mysql import INTEGER d = Date().with_variant( - DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), - "sqlite" - ) + DATE(storage_format="%(day)02d.%(month)02d.%(year)04d"), "sqlite" + ) - i = Integer().with_variant( - INTEGER(display_width=5), - "mysql" - ) + i = Integer().with_variant(INTEGER(display_width=5), "mysql") :meth:`.TypeEngine.with_variant` isn't new, it was added in SQLAlchemy 0.7.2. So code that is running on the 0.8 series can be corrected to use @@ -549,7 +570,7 @@ The precedence rules for COLLATE have been changed Previously, an expression like the following:: - print((column('x') == 'somevalue').collate("en_EN")) + print((column("x") == "somevalue").collate("en_EN")) would produce an expression like this:: @@ -567,7 +588,7 @@ The potentially backwards incompatible change arises if the :meth:`.ColumnOperators.collate` operator is being applied to the right-hand column, as follows:: - print(column('x') == literal('somevalue').collate("en_EN")) + print(column("x") == literal("somevalue").collate("en_EN")) In 0.8, this produces:: @@ -584,11 +605,11 @@ The :meth:`.ColumnOperators.collate` operator now works more appropriately withi generated:: >>> # 0.8 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) (x COLLATE en_EN) DESC >>> # 0.9 - >>> print(column('x').collate('en_EN').desc()) + >>> print(column("x").collate("en_EN").desc()) x COLLATE en_EN DESC :ticket:`2879` @@ -604,7 +625,7 @@ The :class:`_postgresql.ENUM` type will now apply escaping to single quote signs within the enumerated values:: >>> from sqlalchemy.dialects import postgresql - >>> type = postgresql.ENUM('one', 'two', "three's", name="myenum") + >>> type = postgresql.ENUM("one", "two", "three's", name="myenum") >>> from sqlalchemy.dialects.postgresql import base >>> print(base.CreateEnumType(type).compile(dialect=postgresql.dialect())) CREATE TYPE myenum AS ENUM ('one','two','three''s') @@ -633,6 +654,7 @@ from all locations in which it had been established:: """listen for before_insert""" # ... + event.remove(MyClass, "before_insert", my_before_insert) In the example above, the ``propagate=True`` flag is set. This @@ -689,13 +711,9 @@ Setting an option on path that is based on a subclass requires that all links in the path be spelled out as class bound attributes, since the :meth:`.PropComparator.of_type` method needs to be called:: - session.query(Company).\ - options( - subqueryload_all( - Company.employees.of_type(Engineer), - Engineer.machines - ) - ) + session.query(Company).options( + subqueryload_all(Company.employees.of_type(Engineer), Engineer.machines) + ) **New Way** @@ -726,7 +744,6 @@ but the intent is clearer:: query(User).options(defaultload("orders").defaultload("items").subqueryload("keywords")) - The dotted style can still be taken advantage of, particularly in the case of skipping over several path elements:: @@ -791,7 +808,6 @@ others:: # undefer all Address columns query(User).options(defaultload(User.addresses).undefer("*")) - :ticket:`1418` @@ -826,7 +842,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.alias() stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) # or into a cte(): @@ -834,7 +851,8 @@ The :func:`_expression.text` construct gains new methods: stmt = stmt.cte("x") stmt = select([addresses]).select_from( - addresses.join(stmt), addresses.c.user_id == stmt.c.id) + addresses.join(stmt), addresses.c.user_id == stmt.c.id + ) :ticket:`2877` @@ -850,9 +868,9 @@ compatible construct can be passed to the new method :meth:`_expression.Insert.f where it will be used to render an ``INSERT .. SELECT`` construct:: >>> from sqlalchemy.sql import table, column - >>> t1 = table('t1', column('a'), column('b')) - >>> t2 = table('t2', column('x'), column('y')) - >>> print(t1.insert().from_select(['a', 'b'], t2.select().where(t2.c.y == 5))) + >>> t1 = table("t1", column("a"), column("b")) + >>> t2 = table("t2", column("x"), column("y")) + >>> print(t1.insert().from_select(["a", "b"], t2.select().where(t2.c.y == 5))) INSERT INTO t1 (a, b) SELECT t2.x, t2.y FROM t2 WHERE t2.y = :y_1 @@ -861,7 +879,7 @@ The construct is smart enough to also accommodate ORM objects such as classes and :class:`_query.Query` objects:: s = Session() - q = s.query(User.id, User.name).filter_by(name='ed') + q = s.query(User.id, User.name).filter_by(name="ed") ins = insert(Address).from_select((Address.id, Address.email_address), q) rendering:: @@ -920,9 +938,10 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both from sqlalchemy.dialects.mysql import DOUBLE import decimal - data = Table('data', metadata, - Column('double_value', - mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)) + data = Table( + "data", + metadata, + Column("double_value", mysql.DOUBLE(decimal_return_scale=12, asdecimal=True)), ) conn.execute( @@ -938,7 +957,6 @@ for ``.decimal_return_scale`` if it is not otherwise specified. If both # much precision for DOUBLE assert result == decimal.Decimal("45.768392065789") - :ticket:`2867` @@ -1004,8 +1022,9 @@ from a backref:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") @@ -1015,21 +1034,22 @@ from a backref:: print("A.bs validator") return item + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) @validates("a", include_backrefs=False) def validate_a(self, key, item): print("B.a validator") return item + a1 = A() a1.bs.append(B()) # prints only "A.bs validator" - :ticket:`1535` @@ -1262,14 +1282,9 @@ without any subqueries generated:: employee_alias = with_polymorphic(Person, [Engineer, Manager], flat=True) - session.query(Company).join( - Company.employees.of_type(employee_alias) - ).filter( - or_( - Engineer.primary_language == 'python', - Manager.manager_name == 'dilbert' - ) - ) + session.query(Company).join(Company.employees.of_type(employee_alias)).filter( + or_(Engineer.primary_language == "python", Manager.manager_name == "dilbert") + ) Generates (everywhere except SQLite):: @@ -1295,7 +1310,9 @@ on the right side. Normally, a joined eager load chain like the following:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) Would not produce an inner join; because of the LEFT OUTER JOIN from user->order, joined eager loading could not use an INNER join from order->items without changing @@ -1311,7 +1328,9 @@ the new "right-nested joins are OK" logic would kick in, and we'd get:: Since we missed the boat on that, to avoid further regressions we've added the above functionality by specifying the string ``"nested"`` to :paramref:`_orm.joinedload.innerjoin`:: - query(User).options(joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested")) + query(User).options( + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="nested") + ) This feature is new in 0.9.4. @@ -1406,16 +1425,18 @@ replacement operation, which in turn should cause the item to be removed from a previous collection:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") + class Child(Base): - __tablename__ = 'child' + __tablename__ = "child" id = Column(Integer, primary_key=True) - parent_id = Column(ForeignKey('parent.id')) + parent_id = Column(ForeignKey("parent.id")) + p1 = Parent() p2 = Parent() @@ -1520,7 +1541,7 @@ Starting with a table such as this:: from sqlalchemy import Table, Boolean, Integer, Column, MetaData - t1 = Table('t', MetaData(), Column('x', Boolean()), Column('y', Integer)) + t1 = Table("t", MetaData(), Column("x", Boolean()), Column("y", Integer)) A select construct will now render the boolean column as a binary expression on backends that don't feature ``true``/``false`` constant behavior:: @@ -1535,8 +1556,9 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi "short circuit" behavior, that is truncating a rendered expression, when a :func:`.true` or :func:`.false` constant is present:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=postgresql.dialect())) + >>> print( + ... select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=postgresql.dialect()) + ... ) SELECT t.x, t.y FROM t WHERE false :func:`.true` can be used as the base to build up an expression:: @@ -1549,8 +1571,7 @@ The :func:`.and_` and :func:`.or_` constructs will now exhibit quasi The boolean constants :func:`.true` and :func:`.false` themselves render as ``0 = 1`` and ``1 = 1`` for a backend with no boolean constants:: - >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile( - ... dialect=mysql.dialect())) + >>> print(select([t1]).where(and_(t1.c.y > 5, false())).compile(dialect=mysql.dialect())) SELECT t.x, t.y FROM t WHERE 0 = 1 Interpretation of ``None``, while not particularly valid SQL, is at least @@ -1581,7 +1602,7 @@ E.g. an example like:: from sqlalchemy.sql import table, column, select, func - t = table('t', column('c1'), column('c2')) + t = table("t", column("c1"), column("c2")) expr = (func.foo(t.c.c1) + t.c.c2).label("expr") stmt = select([expr]).order_by(expr) @@ -1620,16 +1641,16 @@ The ``__eq__()`` method now compares both sides as a tuple and also an ``__lt__()`` method has been added:: users.insert().execute( - dict(user_id=1, user_name='foo'), - dict(user_id=2, user_name='bar'), - dict(user_id=3, user_name='def'), - ) + dict(user_id=1, user_name="foo"), + dict(user_id=2, user_name="bar"), + dict(user_id=3, user_name="def"), + ) rows = users.select().order_by(users.c.user_name).execute().fetchall() - eq_(rows, [(2, 'bar'), (3, 'def'), (1, 'foo')]) + eq_(rows, [(2, "bar"), (3, "def"), (1, "foo")]) - eq_(sorted(rows), [(1, 'foo'), (2, 'bar'), (3, 'def')]) + eq_(sorted(rows), [(1, "foo"), (2, "bar"), (3, "def")]) :ticket:`2848` @@ -1667,7 +1688,7 @@ Above, ``bp`` remains unchanged, but the ``String`` type will be used when the statement is executed, which we can see by examining the ``binds`` dictionary:: >>> compiled = stmt.compile() - >>> compiled.binds['some_col'].type + >>> compiled.binds["some_col"].type String The feature allows custom types to take their expected effect within INSERT/UPDATE @@ -1727,10 +1748,10 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) >>> t2.c.t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() @@ -1738,16 +1759,23 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKeyConstraint >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, - ... Column('t1a'), Column('t1b'), - ... ForeignKeyConstraint(['t1a', 't1b'], ['t1.a', 't1.b'])) + >>> t2 = Table( + ... "t2", + ... metadata, + ... Column("t1a"), + ... Column("t1b"), + ... ForeignKeyConstraint(["t1a", "t1b"], ["t1.a", "t1.b"]), + ... ) >>> t2.c.t1a.type NullType() >>> t2.c.t1b.type NullType() - >>> t1 = Table('t1', metadata, - ... Column('a', Integer, primary_key=True), - ... Column('b', Integer, primary_key=True)) + >>> t1 = Table( + ... "t1", + ... metadata, + ... Column("a", Integer, primary_key=True), + ... Column("b", Integer, primary_key=True), + ... ) >>> t2.c.t1a.type Integer() >>> t2.c.t1b.type @@ -1758,13 +1786,13 @@ Scenarios which now work correctly include: >>> from sqlalchemy import Table, MetaData, Column, Integer, ForeignKey >>> metadata = MetaData() - >>> t2 = Table('t2', metadata, Column('t1id', ForeignKey('t1.id'))) - >>> t3 = Table('t3', metadata, Column('t2t1id', ForeignKey('t2.t1id'))) + >>> t2 = Table("t2", metadata, Column("t1id", ForeignKey("t1.id"))) + >>> t3 = Table("t3", metadata, Column("t2t1id", ForeignKey("t2.t1id"))) >>> t2.c.t1id.type NullType() >>> t3.c.t2t1id.type NullType() - >>> t1 = Table('t1', metadata, Column('id', Integer, primary_key=True)) + >>> t1 = Table("t1", metadata, Column("id", Integer, primary_key=True)) >>> t2.c.t1id.type Integer() >>> t3.c.t2t1id.type diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 68fb0bd7773..2ff86415015 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -71,15 +71,16 @@ once, a query as a pre-compiled unit begins to be feasible:: bakery = baked.bakery() + def search_for_user(session, username, email=None): baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter(User.name == bindparam('username')) + baked_query += lambda q: q.filter(User.name == bindparam("username")) baked_query += lambda q: q.order_by(User.id) if email: - baked_query += lambda q: q.filter(User.email == bindparam('email')) + baked_query += lambda q: q.filter(User.email == bindparam("email")) result = baked_query(session).params(username=username, email=email).all() @@ -109,10 +110,11 @@ call upon mixin-established columns and will receive a reference to the correct @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Above, ``SomeClass.foobar_prop`` will be invoked against ``SomeClass``, @@ -132,10 +134,11 @@ this:: @declared_attr def foobar_prop(cls): - return column_property('foobar: ' + cls.foobar) + return column_property("foobar: " + cls.foobar) + class SomeClass(HasFooBar, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) Previously, ``SomeClass`` would be mapped with one particular copy of @@ -167,16 +170,19 @@ applied:: @declared_attr.cascading def id(cls): if has_inherited_table(cls): - return Column(ForeignKey('myclass.id'), primary_key=True) + return Column(ForeignKey("myclass.id"), primary_key=True) else: return Column(Integer, primary_key=True) + class MyClass(HasIdMixin, Base): - __tablename__ = 'myclass' + __tablename__ = "myclass" # ... + class MySubClass(MyClass): - "" + """""" + # ... .. seealso:: @@ -189,13 +195,17 @@ on the abstract base:: from sqlalchemy import Column, Integer, ForeignKey from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import (declarative_base, declared_attr, - AbstractConcreteBase) + from sqlalchemy.ext.declarative import ( + declarative_base, + declared_attr, + AbstractConcreteBase, + ) Base = declarative_base() + class Something(Base): - __tablename__ = u'something' + __tablename__ = "something" id = Column(Integer, primary_key=True) @@ -212,9 +222,8 @@ on the abstract base:: class Concrete(Abstract): - __tablename__ = u'cca' - __mapper_args__ = {'polymorphic_identity': 'cca', 'concrete': True} - + __tablename__ = "cca" + __mapper_args__ = {"polymorphic_identity": "cca", "concrete": True} The above mapping will set up a table ``cca`` with both an ``id`` and a ``something_id`` column, and ``Concrete`` will also have a relationship @@ -240,17 +249,19 @@ of load that's improved the most:: Base = declarative_base() + class Foo(Base): __table__ = Table( - 'foo', Base.metadata, - Column('id', Integer, primary_key=True), - Column('a', Integer(), nullable=False), - Column('b', Integer(), nullable=False), - Column('c', Integer(), nullable=False), + "foo", + Base.metadata, + Column("id", Integer, primary_key=True), + Column("a", Integer(), nullable=False), + Column("b", Integer(), nullable=False), + Column("c", Integer(), nullable=False), ) - engine = create_engine( - 'mysql+mysqldb://scott:tiger@localhost/test', echo=True) + + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True) sess = Session(engine) @@ -385,32 +396,29 @@ of inheritance-oriented scenarios, including: * Binding to a Mixin or Abstract Class:: class MyClass(SomeMixin, Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" # ... - session = Session(binds={SomeMixin: some_engine}) + session = Session(binds={SomeMixin: some_engine}) * Binding to inherited concrete subclasses individually based on table:: class BaseClass(Base): - __tablename__ = 'base' + __tablename__ = "base" # ... + class ConcreteSubClass(BaseClass): - __tablename__ = 'concrete' + __tablename__ = "concrete" # ... - __mapper_args__ = {'concrete': True} - + __mapper_args__ = {"concrete": True} - session = Session(binds={ - base_table: some_engine, - concrete_table: some_other_engine - }) + session = Session(binds={base_table: some_engine, concrete_table: some_other_engine}) :ticket:`3035` @@ -446,10 +454,10 @@ These scenarios include: statement as well as for the SELECT used by the "fetch" strategy:: session.query(User).filter(User.id == 15).update( - {"name": "foob"}, synchronize_session='fetch') + {"name": "foob"}, synchronize_session="fetch" + ) - session.query(User).filter(User.id == 15).delete( - synchronize_session='fetch') + session.query(User).filter(User.id == 15).delete(synchronize_session="fetch") * Queries against individual columns:: @@ -488,7 +496,7 @@ at the attribute. Below this is illustrated using the return self.value + 5 - inspect(SomeObject).all_orm_descriptors.some_prop.info['foo'] = 'bar' + inspect(SomeObject).all_orm_descriptors.some_prop.info["foo"] = "bar" It is also available as a constructor argument for all :class:`.SchemaItem` objects (e.g. :class:`_schema.ForeignKey`, :class:`.UniqueConstraint` etc.) as well @@ -510,20 +518,19 @@ as the "order by label" logic introduced in 0.9 (see :ref:`migration_1068`). Given a mapping like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) - A.b = column_property( - select([func.max(B.id)]).where(B.a_id == A.id).correlate(A) - ) + A.b = column_property(select([func.max(B.id)]).where(B.a_id == A.id).correlate(A)) A simple scenario that included "A.b" twice would fail to render correctly:: @@ -550,12 +557,12 @@ There were also many scenarios where the "order by" logic would fail to order by label, for example if the mapping were "polymorphic":: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'with_polymorphic': '*'} + __mapper_args__ = {"polymorphic_on": type, "with_polymorphic": "*"} The order_by would fail to use the label, as it would be anonymized due to the polymorphic loading:: @@ -592,7 +599,7 @@ any SQL expression, in addition to integer values, as arguments. The ORM this is used to allow a bound parameter to be passed, which can be substituted with a value later:: - sel = select([table]).limit(bindparam('mylimit')).offset(bindparam('myoffset')) + sel = select([table]).limit(bindparam("mylimit")).offset(bindparam("myoffset")) Dialects which don't support non-integer LIMIT or OFFSET expressions may continue to not support this behavior; third party dialects may also need modification @@ -702,12 +709,12 @@ CHECK Constraints now support the ``%(column_0_name)s`` token in naming conventi The ``%(column_0_name)s`` will derive from the first column found in the expression of a :class:`.CheckConstraint`:: - metadata = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata, - Column('value', Integer), + foo = Table( + "foo", + metadata, + Column("value", Integer), ) CheckConstraint(foo.c.value > 5) @@ -743,10 +750,7 @@ Since at least version 0.8, a :class:`.Constraint` has had the ability to m = MetaData() - t = Table('t', m, - Column('a', Integer), - Column('b', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer)) uq = UniqueConstraint(t.c.a, t.c.b) # will auto-attach to Table @@ -762,12 +766,12 @@ the :class:`.Constraint` is also added:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) uq = UniqueConstraint(a, b) - t = Table('t', m, a, b) + t = Table("t", m, a, b) assert uq in t.constraints # constraint auto-attached @@ -781,12 +785,12 @@ tracking for the addition of names to a :class:`_schema.Table`:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") - t = Table('t', m, a, b) + t = Table("t", m, a, b) # constraint *not* auto-attached, as we do not have tracking # to locate when a name 'b' becomes available on the table @@ -806,18 +810,17 @@ the :class:`.Constraint` is constructed:: m = MetaData() - a = Column('a', Integer) - b = Column('b', Integer) + a = Column("a", Integer) + b = Column("b", Integer) - t = Table('t', m, a, b) + t = Table("t", m, a, b) - uq = UniqueConstraint(a, 'b') + uq = UniqueConstraint(a, "b") # constraint auto-attached normally as in older versions assert uq in t.constraints - :ticket:`3341` :ticket:`3411` @@ -838,12 +841,11 @@ expressions are rendered as constants into the SELECT statement:: m = MetaData() t = Table( - 't', m, - Column('x', Integer), - Column('y', Integer, default=func.somefunction())) + "t", m, Column("x", Integer), Column("y", Integer, default=func.somefunction()) + ) stmt = select([t.c.x]) - print(t.insert().from_select(['x'], stmt)) + print(t.insert().from_select(["x"], stmt)) Will render:: @@ -870,9 +872,10 @@ embedded in SQL to render correctly, such as:: metadata = MetaData() - tbl = Table("derp", metadata, - Column("arr", ARRAY(Text), - server_default=array(["foo", "bar", "baz"])), + tbl = Table( + "derp", + metadata, + Column("arr", ARRAY(Text), server_default=array(["foo", "bar", "baz"])), ) print(CreateTable(tbl).compile(dialect=postgresql.dialect())) @@ -981,8 +984,9 @@ emitted for ten of the parameter sets, out of a total of 1000:: warnings.filterwarnings("once") for i in range(1000): - e.execute(select([cast( - ('foo_%d' % random.randint(0, 1000000)).encode('ascii'), Unicode)])) + e.execute( + select([cast(("foo_%d" % random.randint(0, 1000000)).encode("ascii"), Unicode)]) + ) The format of the warning here is:: @@ -1015,40 +1019,41 @@ onto the class. The string names are now resolved as attribute names in earnest:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) Above, the column ``user_name`` is mapped as ``name``. Previously, a call to :meth:`_query.Query.update` that was passed strings would have to have been called as follows:: - session.query(User).update({'user_name': 'moonbeam'}) + session.query(User).update({"user_name": "moonbeam"}) The given string is now resolved against the entity:: - session.query(User).update({'name': 'moonbeam'}) + session.query(User).update({"name": "moonbeam"}) It is typically preferable to use the attribute directly, to avoid any ambiguity:: - session.query(User).update({User.name: 'moonbeam'}) + session.query(User).update({User.name: "moonbeam"}) The change also indicates that synonyms and hybrid attributes can be referred to by string name as well:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - name = Column('user_name', String(50)) + name = Column("user_name", String(50)) @hybrid_property def fullname(self): return self.name - session.query(User).update({'fullname': 'moonbeam'}) + + session.query(User).update({"fullname": "moonbeam"}) :ticket:`3228` @@ -1108,13 +1113,14 @@ it only became apparent as a result of :ticket:`3371`. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) a = relationship("A") Given ``A``, with primary key of 7, but which we changed to be 10 @@ -1254,15 +1260,16 @@ attributes, a change in behavior can be seen here when assigning None. Given a mapping:: class A(Base): - __tablename__ = 'table_a' + __tablename__ = "table_a" id = Column(Integer, primary_key=True) + class B(Base): - __tablename__ = 'table_b' + __tablename__ = "table_b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('table_a.id')) + a_id = Column(ForeignKey("table_a.id")) a = relationship(A) In 1.0, the relationship-bound attribute takes precedence over the FK-bound @@ -1277,7 +1284,7 @@ only takes effect if a value is assigned; the None is not considered:: session.flush() b1 = B() - b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 + b1.a = a1 # we expect a_id to be '1'; takes precedence in 0.9 and 1.0 b2 = B() b2.a = None # we expect a_id to be None; takes precedence only in 1.0 @@ -1339,7 +1346,7 @@ with yield-per (subquery loading could be in theory, however). When this error is raised, the :func:`.lazyload` option can be sent with an asterisk:: - q = sess.query(Object).options(lazyload('*')).yield_per(100) + q = sess.query(Object).options(lazyload("*")).yield_per(100) or use :meth:`_query.Query.enable_eagerloads`:: @@ -1348,8 +1355,11 @@ or use :meth:`_query.Query.enable_eagerloads`:: The :func:`.lazyload` option has the advantage that additional many-to-one joined loader options can still be used:: - q = sess.query(Object).options( - lazyload('*'), joinedload("some_manytoone")).yield_per(100) + q = ( + sess.query(Object) + .options(lazyload("*"), joinedload("some_manytoone")) + .yield_per(100) + ) .. _bug_3233: @@ -1370,15 +1380,17 @@ Starting with a mapping as:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) A query that joins to ``A.bs`` twice:: @@ -1392,9 +1404,9 @@ Will render:: The query deduplicates the redundant ``A.bs`` because it is attempting to support a case like the following:: - s.query(A).join(A.bs).\ - filter(B.foo == 'bar').\ - reset_joinpoint().join(A.bs, B.cs).filter(C.bar == 'bat') + s.query(A).join(A.bs).filter(B.foo == "bar").reset_joinpoint().join(A.bs, B.cs).filter( + C.bar == "bat" + ) That is, the ``A.bs`` is part of a "path". As part of :ticket:`3367`, arriving at the same endpoint twice without it being part of a @@ -1437,31 +1449,33 @@ a mapping as follows:: Base = declarative_base() + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) type = Column(String) - __mapper_args__ = {'polymorphic_on': type, 'polymorphic_identity': 'a'} + __mapper_args__ = {"polymorphic_on": type, "polymorphic_identity": "a"} class ASub1(A): - __mapper_args__ = {'polymorphic_identity': 'asub1'} + __mapper_args__ = {"polymorphic_identity": "asub1"} class ASub2(A): - __mapper_args__ = {'polymorphic_identity': 'asub2'} + __mapper_args__ = {"polymorphic_identity": "asub2"} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(Integer, ForeignKey("a.id")) - a = relationship("A", primaryjoin="B.a_id == A.id", backref='b') + a = relationship("A", primaryjoin="B.a_id == A.id", backref="b") + s = Session() @@ -1543,26 +1557,28 @@ Previously, the sample code looked like:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row, result): - return dict( - zip(labels, (proc(row, result) for proc in procs)) - ) + return dict(zip(labels, (proc(row, result) for proc in procs))) + return proc The unused ``result`` member is now removed:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. seealso:: @@ -1587,7 +1603,8 @@ join eager load will use a right-nested join. ``"nested"`` is now implied when using ``innerjoin=True``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True)) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin=True) + ) With the new default, this will render the FROM clause in the form:: @@ -1601,7 +1618,8 @@ optimization parameter to take effect in all cases. To get the older behavior, use ``innerjoin="unnested"``:: query(User).options( - joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested")) + joinedload("orders", innerjoin=False).joinedload("items", innerjoin="unnested") + ) This will avoid right-nested joins and chain the joins together using all OUTER joins despite the innerjoin directive:: @@ -1626,15 +1644,16 @@ Subqueries no longer applied to uselist=False joined eager loads Given a joined eager load like the following:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", uselist=False) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) + s = Session() print(s.query(A).options(joinedload(A.b)).limit(5)) @@ -1709,7 +1728,8 @@ Change to single-table-inheritance criteria when using from_self(), count() Given a single-table inheritance mapping, such as:: class Widget(Base): - __table__ = 'widget_table' + __table__ = "widget_table" + class FooWidget(Widget): pass @@ -1769,20 +1789,20 @@ the "single table criteria" when joining on a relationship. Given a mapping as:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) - related_id = Column(ForeignKey('related.id')) + related_id = Column(ForeignKey("related.id")) related = relationship("Related", backref="widget") - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} class Related(Base): - __tablename__ = 'related' + __tablename__ = "related" id = Column(Integer, primary_key=True) It's been the behavior for quite some time that a JOIN on the relationship @@ -1850,7 +1870,7 @@ behavior of passing string values that become parameterized:: # This is a normal Core expression with a string argument - # we aren't talking about this!! - stmt = select([sometable]).where(sometable.c.somecolumn == 'value') + stmt = select([sometable]).where(sometable.c.somecolumn == "value") The Core tutorial has long featured an example of the use of this technique, using a :func:`_expression.select` construct where virtually all components of it @@ -1893,24 +1913,28 @@ one wishes the warnings to be exceptions, the should be used:: import warnings - warnings.simplefilter("error") # all warnings raise an exception + + warnings.simplefilter("error") # all warnings raise an exception Given the above warnings, our statement works just fine, but to get rid of the warnings we would rewrite our statement as follows:: from sqlalchemy import select, text - stmt = select([ - text("a"), - text("b") - ]).where(text("a = b")).select_from(text("sometable")) + + stmt = ( + select([text("a"), text("b")]).where(text("a = b")).select_from(text("sometable")) + ) and as the warnings suggest, we can give our statement more specificity about the text if we use :func:`_expression.column` and :func:`.table`:: from sqlalchemy import select, text, column, table - stmt = select([column("a"), column("b")]).\ - where(text("a = b")).select_from(table("sometable")) + stmt = ( + select([column("a"), column("b")]) + .where(text("a = b")) + .select_from(table("sometable")) + ) Where note also that :func:`.table` and :func:`_expression.column` can now be imported from "sqlalchemy" without the "sql" part. @@ -1927,10 +1951,11 @@ of this change we have enhanced its functionality. When we have a :func:`_expression.select` or :class:`_query.Query` that refers to some column name or named label, we might want to GROUP BY and/or ORDER BY known columns or labels:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).group_by("name").order_by("id_count") + stmt = ( + select([user.c.name, func.count(user.c.id).label("id_count")]) + .group_by("name") + .order_by("id_count") + ) In the above statement we expect to see "ORDER BY id_count", as opposed to a re-statement of the function. The string argument given is actively @@ -1944,10 +1969,9 @@ the ``"name"`` expression has been resolved to ``users.name``!):: However, if we refer to a name that cannot be located, then we get the warning again, as below:: - stmt = select([ - user.c.name, - func.count(user.c.id).label("id_count") - ]).order_by("some_label") + stmt = select([user.c.name, func.count(user.c.id).label("id_count")]).order_by( + "some_label" + ) The output does what we say, but again it warns us:: @@ -1995,16 +2019,21 @@ that of an "executemany" style of invocation:: counter = itertools.count(1) t = Table( - 'my_table', metadata, - Column('id', Integer, default=lambda: next(counter)), - Column('data', String) + "my_table", + metadata, + Column("id", Integer, default=lambda: next(counter)), + Column("data", String), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {"data": "d3"}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {"data": "d3"}, + ] + ) + ) The above example will invoke ``next(counter)`` for each row individually as would be expected:: @@ -2034,16 +2063,21 @@ value is required; if an omitted value only refers to a server-side default, an exception is raised:: t = Table( - 'my_table', metadata, - Column('id', Integer, primary_key=True), - Column('data', String, server_default='some default') + "my_table", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String, server_default="some default"), ) - conn.execute(t.insert().values([ - {"data": "d1"}, - {"data": "d2"}, - {}, - ])) + conn.execute( + t.insert().values( + [ + {"data": "d1"}, + {"data": "d2"}, + {}, + ] + ) + ) will raise:: @@ -2109,7 +2143,7 @@ data is needed. A :class:`_schema.Table` can be set up for reflection by passing :paramref:`_schema.Table.autoload_with` alone:: - my_table = Table('my_table', metadata, autoload_with=some_engine) + my_table = Table("my_table", metadata, autoload_with=some_engine) :ticket:`3027` @@ -2224,8 +2258,8 @@ An :class:`_postgresql.ENUM` that is created **without** being explicitly associated with a :class:`_schema.MetaData` object will be created *and* dropped corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`:: - table = Table('sometable', metadata, - Column('some_enum', ENUM('a', 'b', 'c', name='myenum')) + table = Table( + "sometable", metadata, Column("some_enum", ENUM("a", "b", "c", name="myenum")) ) table.create(engine) # will emit CREATE TYPE and CREATE TABLE @@ -2242,11 +2276,9 @@ corresponding to :meth:`_schema.Table.create` and :meth:`_schema.Table.drop`, wi the exception of :meth:`_schema.Table.create` called with the ``checkfirst=True`` flag:: - my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata) + my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata) - table = Table('sometable', metadata, - Column('some_enum', my_enum) - ) + table = Table("sometable", metadata, Column("some_enum", my_enum)) # will fail: ENUM 'my_enum' does not exist table.create(engine) @@ -2256,10 +2288,9 @@ flag:: table.drop(engine) # will emit DROP TABLE, *not* DROP TYPE - metadata.drop_all(engine) # will emit DROP TYPE - - metadata.create_all(engine) # will emit CREATE TYPE + metadata.drop_all(engine) # will emit DROP TYPE + metadata.create_all(engine) # will emit CREATE TYPE :ticket:`3319` @@ -2334,13 +2365,14 @@ so that code like the following may proceed:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) @@ -2357,21 +2389,23 @@ the temporary table:: metadata = MetaData() user_tmp = Table( - "user_tmp", metadata, + "user_tmp", + metadata, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), - prefixes=['TEMPORARY'] + Column("name", VARCHAR(50)), + prefixes=["TEMPORARY"], ) - e = create_engine("postgresql://scott:tiger@localhost/test", echo='debug') + e = create_engine("postgresql://scott:tiger@localhost/test", echo="debug") with e.begin() as conn: user_tmp.create(conn, checkfirst=True) m2 = MetaData() user = Table( - "user_tmp", m2, + "user_tmp", + m2, Column("id", INT, primary_key=True), - Column('name', VARCHAR(50)), + Column("name", VARCHAR(50)), ) # in 0.9, *will create* the new table, overwriting the old one. @@ -2548,11 +2582,13 @@ Code like the following will now function correctly and return floating points on MySQL:: >>> connection.execute( - ... select([ - ... matchtable.c.title.match('Agile Ruby Programming').label('ruby'), - ... matchtable.c.title.match('Dive Python').label('python'), - ... matchtable.c.title - ... ]).order_by(matchtable.c.id) + ... select( + ... [ + ... matchtable.c.title.match("Agile Ruby Programming").label("ruby"), + ... matchtable.c.title.match("Dive Python").label("python"), + ... matchtable.c.title, + ... ] + ... ).order_by(matchtable.c.id) ... ) [ (2.0, 0.0, 'Agile Web Development with Ruby On Rails'), @@ -2614,7 +2650,9 @@ Connecting to SQL Server with PyODBC using a DSN-less connection, e.g. with an explicit hostname, now requires a driver name - SQLAlchemy will no longer attempt to guess a default:: - engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0") + engine = create_engine( + "mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0" + ) SQLAlchemy's previously hardcoded default of "SQL Server" is obsolete on Windows, and SQLAlchemy cannot be tasked with guessing the best driver @@ -2642,13 +2680,16 @@ Improved support for CTEs in Oracle CTE support has been fixed up for Oracle, and there is also a new feature :meth:`_expression.CTE.with_suffixes` that can assist with Oracle's special directives:: - included_parts = select([ - part.c.sub_part, part.c.part, part.c.quantity - ]).where(part.c.part == "p1").\ - cte(name="included_parts", recursive=True).\ - suffix_with( + included_parts = ( + select([part.c.sub_part, part.c.part, part.c.quantity]) + .where(part.c.part == "p1") + .cte(name="included_parts", recursive=True) + .suffix_with( "search depth first by part set ord1", - "cycle part set y_cycle to 1 default 0", dialect='oracle') + "cycle part set y_cycle to 1 default 0", + dialect="oracle", + ) + ) :ticket:`3220` diff --git a/doc/build/changelog/migration_11.rst b/doc/build/changelog/migration_11.rst index 5c1b842b61e..6b25bc41685 100644 --- a/doc/build/changelog/migration_11.rst +++ b/doc/build/changelog/migration_11.rst @@ -207,29 +207,35 @@ expression, and ``func.date()`` applied to a datetime expression; both examples will return duplicate rows due to the joined eager load unless explicit typing is applied:: - result = session.query( - func.substr(A.some_thing, 0, 4), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4), A).options(joinedload(A.bs)).all() + ) - users = session.query( - func.date( - User.date_created, 'start of month' - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month").label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) The above examples, in order to retain deduping, should be specified as:: - result = session.query( - func.substr(A.some_thing, 0, 4, type_=String), A - ).options(joinedload(A.bs)).all() + result = ( + session.query(func.substr(A.some_thing, 0, 4, type_=String), A) + .options(joinedload(A.bs)) + .all() + ) - users = session.query( - func.date( - User.date_created, 'start of month', type_=DateTime - ).label('month'), - User, - ).options(joinedload(User.orders)).all() + users = ( + session.query( + func.date(User.date_created, "start of month", type_=DateTime).label("month"), + User, + ) + .options(joinedload(User.orders)) + .all() + ) Additionally, the treatment of a so-called "unhashable" type is slightly different than its been in previous releases; internally we are using @@ -259,7 +265,6 @@ string value:: >>> some_user = User() >>> q = s.query(User).filter(User.name == some_user) - ... sqlalchemy.exc.ArgumentError: Object <__main__.User object at 0x103167e90> is not legal as a SQL literal value The exception is now immediate when the comparison is made between @@ -292,18 +297,18 @@ refer to specific elements of an "indexable" data type, such as an array or JSON field:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(Integer, primary_key=True) data = Column(JSON) - name = index_property('data', 'name') + name = index_property("data", "name") Above, the ``name`` attribute will read/write the field ``"name"`` from the JSON column ``data``, after initializing it to an empty dictionary:: - >>> person = Person(name='foobar') + >>> person = Person(name="foobar") >>> person.name foobar @@ -346,17 +351,18 @@ no longer inappropriately add the "single inheritance" criteria when the query is against a subquery expression such as an exists:: class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" id = Column(Integer, primary_key=True) type = Column(String) data = Column(String) - __mapper_args__ = {'polymorphic_on': type} + __mapper_args__ = {"polymorphic_on": type} class FooWidget(Widget): - __mapper_args__ = {'polymorphic_identity': 'foo'} + __mapper_args__ = {"polymorphic_identity": "foo"} - q = session.query(FooWidget).filter(FooWidget.data == 'bar').exists() + + q = session.query(FooWidget).filter(FooWidget.data == "bar").exists() session.query(q).all() @@ -433,10 +439,12 @@ removed would be lost, and the flush would incorrectly raise an error:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + e = create_engine("sqlite://", echo=True) Base.metadata.create_all(e) @@ -522,25 +530,23 @@ the :paramref:`.orm.mapper.passive_deletes` option:: class A(Base): __tablename__ = "a" - id = Column('id', Integer, primary_key=True) + id = Column("id", Integer, primary_key=True) type = Column(String) __mapper_args__ = { - 'polymorphic_on': type, - 'polymorphic_identity': 'a', - 'passive_deletes': True + "polymorphic_on": type, + "polymorphic_identity": "a", + "passive_deletes": True, } class B(A): - __tablename__ = 'b' - b_table_id = Column('b_table_id', Integer, primary_key=True) - bid = Column('bid', Integer, ForeignKey('a.id', ondelete="CASCADE")) - data = Column('data', String) + __tablename__ = "b" + b_table_id = Column("b_table_id", Integer, primary_key=True) + bid = Column("bid", Integer, ForeignKey("a.id", ondelete="CASCADE")) + data = Column("data", String) - __mapper_args__ = { - 'polymorphic_identity': 'b' - } + __mapper_args__ = {"polymorphic_identity": "b"} With the above mapping, the :paramref:`.orm.mapper.passive_deletes` option is configured on the base mapper; it takes effect for all non-base mappers @@ -571,22 +577,24 @@ Same-named backrefs will not raise an error when applied to concrete inheritance The following mapping has always been possible without issue:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a_id", backref="a") + class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) b = relationship("B", foreign_keys="B.a1_id", backref="a1") - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) Above, even though class ``A`` and class ``A1`` have a relationship named ``b``, no conflict warning or error occurs because class ``A1`` is @@ -596,22 +604,22 @@ However, if the relationships were configured the other way, an error would occur:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class A1(A): - __tablename__ = 'a1' + __tablename__ = "a1" id = Column(Integer, primary_key=True) - __mapper_args__ = {'concrete': True} + __mapper_args__ = {"concrete": True} class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - a1_id = Column(ForeignKey('a1.id')) + a_id = Column(ForeignKey("a.id")) + a1_id = Column(ForeignKey("a1.id")) a = relationship("A", backref="b") a1 = relationship("A1", backref="b") @@ -634,22 +642,21 @@ on inherited mapper ''; this can cause dependency issues during flush". An example is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") class ASub(A): - __tablename__ = 'a_sub' - id = Column(Integer, ForeignKey('a.id'), primary_key=True) + __tablename__ = "a_sub" + id = Column(Integer, ForeignKey("a.id"), primary_key=True) bs = relationship("B") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - + a_id = Column(ForeignKey("a.id")) This warning dates back to the 0.4 series in 2007 and is based on a version of the unit of work code that has since been entirely rewritten. Currently, there @@ -672,7 +679,7 @@ A hybrid method or property will now reflect the ``__doc__`` value present in the original docstring:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) name = Column(String) @@ -710,9 +717,9 @@ also propagated from the hybrid descriptor itself, rather than from the underlyi expression. That is, accessing ``A.some_name.info`` now returns the same dictionary that you'd get from ``inspect(A).all_orm_descriptors['some_name'].info``:: - >>> A.some_name.info['foo'] = 'bar' + >>> A.some_name.info["foo"] = "bar" >>> from sqlalchemy import inspect - >>> inspect(A).all_orm_descriptors['some_name'].info + >>> inspect(A).all_orm_descriptors["some_name"].info {'foo': 'bar'} Note that this ``.info`` dictionary is **separate** from that of a mapped attribute @@ -739,11 +746,11 @@ consistent. Given:: - u1 = User(id=7, name='x') + u1 = User(id=7, name="x") u1.orders = [ - Order(description='o1', address=Address(id=1, email_address='a')), - Order(description='o2', address=Address(id=1, email_address='b')), - Order(description='o3', address=Address(id=1, email_address='c')) + Order(description="o1", address=Address(id=1, email_address="a")), + Order(description="o2", address=Address(id=1, email_address="b")), + Order(description="o3", address=Address(id=1, email_address="c")), ] sess = Session() @@ -925,32 +932,32 @@ row on a different "path" that doesn't include the attribute. This is a deep use case that's hard to reproduce, but the general idea is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - c_id = Column(ForeignKey('c.id')) + b_id = Column(ForeignKey("b.id")) + c_id = Column(ForeignKey("c.id")) b = relationship("B") c = relationship("C") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) + c_id = Column(ForeignKey("c.id")) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) d = relationship("D") class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -959,7 +966,9 @@ deep use case that's hard to reproduce, but the general idea is as follows:: q = s.query(A) q = q.join(A.b).join(c_alias_1, B.c).join(c_alias_1.d) - q = q.options(contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d)) + q = q.options( + contains_eager(A.b).contains_eager(B.c, alias=c_alias_1).contains_eager(C.d) + ) q = q.join(c_alias_2, A.c) q = q.options(contains_eager(A.c, alias=c_alias_2)) @@ -1121,6 +1130,7 @@ for specific exceptions:: engine = create_engine("postgresql+psycopg2://") + @event.listens_for(engine, "handle_error") def cancel_disconnect(ctx): if isinstance(ctx.original_exception, KeyboardInterrupt): @@ -1149,25 +1159,22 @@ statement:: >>> from sqlalchemy import table, column, select, literal, exists >>> orders = table( - ... 'orders', - ... column('region'), - ... column('amount'), - ... column('product'), - ... column('quantity') + ... "orders", column("region"), column("amount"), column("product"), column("quantity") ... ) >>> >>> upsert = ( ... orders.update() - ... .where(orders.c.region == 'Region1') - ... .values(amount=1.0, product='Product1', quantity=1) - ... .returning(*(orders.c._all_columns)).cte('upsert')) + ... .where(orders.c.region == "Region1") + ... .values(amount=1.0, product="Product1", quantity=1) + ... .returning(*(orders.c._all_columns)) + ... .cte("upsert") + ... ) >>> >>> insert = orders.insert().from_select( ... orders.c.keys(), - ... select([ - ... literal('Region1'), literal(1.0), - ... literal('Product1'), literal(1) - ... ]).where(~exists(upsert.select())) + ... select([literal("Region1"), literal(1.0), literal("Product1"), literal(1)]).where( + ... ~exists(upsert.select()) + ... ), ... ) >>> >>> print(insert) # note formatting added for clarity @@ -1198,13 +1205,13 @@ RANGE and ROWS expressions for window functions:: >>> from sqlalchemy import func - >>> print(func.row_number().over(order_by='x', range_=(-5, 10))) + >>> print(func.row_number().over(order_by="x", range_=(-5, 10))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND :param_2 FOLLOWING) - >>> print(func.row_number().over(order_by='x', rows=(None, 0))) + >>> print(func.row_number().over(order_by="x", rows=(None, 0))) row_number() OVER (ORDER BY x ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) - >>> print(func.row_number().over(order_by='x', range_=(-2, None))) + >>> print(func.row_number().over(order_by="x", range_=(-2, None))) row_number() OVER (ORDER BY x RANGE BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) :paramref:`.expression.over.range_` and :paramref:`.expression.over.rows` are specified as @@ -1230,10 +1237,13 @@ correlation of tables that are derived from the same FROM clause as the selectable, e.g. lateral correlation:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select([books.c.book_id]).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select([books.c.book_id]) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select([people]).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -1262,10 +1272,7 @@ construct similar to an alias:: from sqlalchemy import func - selectable = people.tablesample( - func.bernoulli(1), - name='alias', - seed=func.random()) + selectable = people.tablesample(func.bernoulli(1), name="alias", seed=func.random()) stmt = select([selectable.c.people_id]) Assuming ``people`` with a column ``people_id``, the above @@ -1295,9 +1302,10 @@ What's changed is that this feature no longer turns on automatically for a *composite* primary key; previously, a table definition such as:: Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) Would have "autoincrement" semantics applied to the ``'x'`` column, only @@ -1306,9 +1314,10 @@ disable this, one would have to turn off ``autoincrement`` on all columns:: # old way Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=False) + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=False), ) With the new behavior, the composite primary key will not have autoincrement @@ -1316,9 +1325,10 @@ semantics unless a column is marked explicitly with ``autoincrement=True``:: # column 'y' will be SERIAL/AUTO_INCREMENT/ auto-generating Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True) + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), ) In order to anticipate some potential backwards-incompatible scenarios, @@ -1327,9 +1337,10 @@ for missing primary key values on composite primary key columns that don't have autoincrement set up; given a table such as:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True), ) An INSERT emitted with no values for this table will produce this warning:: @@ -1349,9 +1360,10 @@ default or something less common such as a trigger, the presence of a value generator can be indicated using :class:`.FetchedValue`:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True, server_default=FetchedValue()), - Column('y', Integer, primary_key=True, server_default=FetchedValue()) + "b", + metadata, + Column("x", Integer, primary_key=True, server_default=FetchedValue()), + Column("y", Integer, primary_key=True, server_default=FetchedValue()), ) For the very unlikely case where a composite primary key is actually intended @@ -1359,9 +1371,10 @@ to store NULL in one or more of its columns (only supported on SQLite and MySQL) specify the column with ``nullable=True``:: Table( - 'b', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, nullable=True) + "b", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, nullable=True), ) In a related change, the ``autoincrement`` flag may be set to True @@ -1384,19 +1397,19 @@ New operators :meth:`.ColumnOperators.is_distinct_from` and :meth:`.ColumnOperators.isnot_distinct_from` allow the IS DISTINCT FROM and IS NOT DISTINCT FROM sql operation:: - >>> print(column('x').is_distinct_from(None)) + >>> print(column("x").is_distinct_from(None)) x IS DISTINCT FROM NULL Handling is provided for NULL, True and False:: - >>> print(column('x').isnot_distinct_from(False)) + >>> print(column("x").isnot_distinct_from(False)) x IS NOT DISTINCT FROM false For SQLite, which doesn't have this operator, "IS" / "IS NOT" is rendered, which on SQLite works for NULL unlike other backends:: >>> from sqlalchemy.dialects import sqlite - >>> print(column('x').is_distinct_from(None).compile(dialect=sqlite.dialect())) + >>> print(column("x").is_distinct_from(None).compile(dialect=sqlite.dialect())) x IS NOT NULL .. _change_1957: @@ -1445,19 +1458,15 @@ and the column arguments passed to :meth:`_expression.TextClause.columns`:: from sqlalchemy import text - stmt = text("SELECT users.id, addresses.id, users.id, " - "users.name, addresses.email_address AS email " - "FROM users JOIN addresses ON users.id=addresses.user_id " - "WHERE users.id = 1").columns( - User.id, - Address.id, - Address.user_id, - User.name, - Address.email_address - ) - - query = session.query(User).from_statement(stmt).\ - options(contains_eager(User.addresses)) + + stmt = text( + "SELECT users.id, addresses.id, users.id, " + "users.name, addresses.email_address AS email " + "FROM users JOIN addresses ON users.id=addresses.user_id " + "WHERE users.id = 1" + ).columns(User.id, Address.id, Address.user_id, User.name, Address.email_address) + + query = session.query(User).from_statement(stmt).options(contains_eager(User.addresses)) result = query.all() Above, the textual SQL contains the column "id" three times, which would @@ -1489,7 +1498,7 @@ Another aspect of this change is that the rules for matching columns have also b to rely upon "positional" matching more fully for compiled SQL constructs as well. Given a statement like the following:: - ua = users.alias('ua') + ua = users.alias("ua") stmt = select([users.c.user_id, ua.c.user_id]) The above statement will compile to:: @@ -1512,7 +1521,7 @@ fetch columns:: ua_id = row[ua.c.user_id] # this still raises, however - user_id = row['user_id'] + user_id = row["user_id"] Much less likely to get an "ambiguous column" error message ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1550,10 +1559,7 @@ string/integer/etc values:: three = 3 - t = Table( - 'data', MetaData(), - Column('value', Enum(MyEnum)) - ) + t = Table("data", MetaData(), Column("value", Enum(MyEnum))) e = create_engine("sqlite://") t.create(e) @@ -1600,8 +1606,9 @@ flag is used (1.1.0b2):: >>> from sqlalchemy import Table, MetaData, Column, Enum, create_engine >>> t = Table( - ... 'data', MetaData(), - ... Column('value', Enum("one", "two", "three", validate_strings=True)) + ... "data", + ... MetaData(), + ... Column("value", Enum("one", "two", "three", validate_strings=True)), ... ) >>> e = create_engine("sqlite://") >>> t.create(e) @@ -1674,8 +1681,8 @@ within logging, exception reporting, as well as ``repr()`` of the row itself:: >>> from sqlalchemy import create_engine >>> import random - >>> e = create_engine("sqlite://", echo='debug') - >>> some_value = ''.join(chr(random.randint(52, 85)) for i in range(5000)) + >>> e = create_engine("sqlite://", echo="debug") + >>> some_value = "".join(chr(random.randint(52, 85)) for i in range(5000)) >>> row = e.execute("select ?", [some_value]).first() ... (lines are wrapped for clarity) ... 2016-02-17 13:23:03,027 INFO sqlalchemy.engine.base.Engine select ? @@ -1752,6 +1759,7 @@ replacing the ``None`` value:: json_value = Column(JSON(none_as_null=False), default="some default") + # would insert "some default" instead of "'null'", # now will insert "'null'" obj = MyObject(json_value=None) @@ -1769,6 +1777,7 @@ inconsistently vs. all other datatypes:: some_other_value = Column(String(50)) json_value = Column(JSON(none_as_null=False)) + # would result in NULL for some_other_value, # but json "'null'" for json_value. Now results in NULL for both # (the json_value is omitted from the INSERT) @@ -1786,9 +1795,7 @@ would be ignored in all cases:: # would insert SQL NULL and/or trigger defaults, # now inserts "'null'" - session.bulk_insert_mappings( - MyObject, - [{"json_value": None}]) + session.bulk_insert_mappings(MyObject, [{"json_value": None}]) The :class:`_types.JSON` type now implements the :attr:`.TypeEngine.should_evaluate_none` flag, @@ -1847,9 +1854,7 @@ is now in Core. The :class:`_types.ARRAY` type still **only works on PostgreSQL**, however it can be used directly, supporting special array use cases such as indexed access, as well as support for the ANY and ALL:: - mytable = Table("mytable", metadata, - Column("data", ARRAY(Integer, dimensions=2)) - ) + mytable = Table("mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))) expr = mytable.c.data[5][6] @@ -1884,7 +1889,6 @@ such as:: subq = select([mytable.c.value]) select([mytable]).where(12 > any_(subq)) - :ticket:`3516` .. _change_3132: @@ -1897,12 +1901,14 @@ function for the ``array_agg()`` SQL function that returns an array, which is now available using :class:`_functions.array_agg`:: from sqlalchemy import func + stmt = select([func.array_agg(table.c.value)]) A PostgreSQL element for an aggregate ORDER BY is also added via :class:`_postgresql.aggregate_order_by`:: from sqlalchemy.dialects.postgresql import aggregate_order_by + expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc())) stmt = select([expr]) @@ -1914,8 +1920,8 @@ The PG dialect itself also provides an :func:`_postgresql.array_agg` wrapper to ensure the :class:`_postgresql.ARRAY` type:: from sqlalchemy.dialects.postgresql import array_agg - stmt = select([array_agg(table.c.value).contains('foo')]) + stmt = select([array_agg(table.c.value).contains("foo")]) Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, ``rank()``, ``dense_rank()`` and others that require an ordering via @@ -1923,12 +1929,13 @@ Additionally, functions like ``percentile_cont()``, ``percentile_disc()``, :meth:`.FunctionElement.within_group` modifier:: from sqlalchemy import func - stmt = select([ - department.c.id, - func.percentile_cont(0.5).within_group( - department.c.salary.desc() - ) - ]) + + stmt = select( + [ + department.c.id, + func.percentile_cont(0.5).within_group(department.c.salary.desc()), + ] + ) The above statement would produce SQL similar to:: @@ -1956,7 +1963,7 @@ an :class:`_postgresql.ENUM` had to look like this:: # old way class MyEnum(TypeDecorator, SchemaType): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') + impl = postgresql.ENUM("one", "two", "three", name="myenum") def _set_table(self, table): self.impl._set_table(table) @@ -1966,8 +1973,7 @@ can be done like any other type:: # new way class MyEnum(TypeDecorator): - impl = postgresql.ENUM('one', 'two', 'three', name='myenum') - + impl = postgresql.ENUM("one", "two", "three", name="myenum") :ticket:`2919` @@ -1987,17 +1993,18 @@ translation works for DDL and SQL generation, as well as with the ORM. For example, if the ``User`` class were assigned the schema "per_user":: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) - __table_args__ = {'schema': 'per_user'} + __table_args__ = {"schema": "per_user"} On each request, the :class:`.Session` can be set up to refer to a different schema each time:: session = Session() - session.connection(execution_options={ - "schema_translate_map": {"per_user": "account_one"}}) + session.connection( + execution_options={"schema_translate_map": {"per_user": "account_one"}} + ) # will query from the ``account_one.user`` table session.query(User).get(5) @@ -2072,21 +2079,21 @@ Then, a mapping where we are equating a string "id" column on one table to an integer "id" column on the other:: class Person(Base): - __tablename__ = 'person' + __tablename__ = "person" id = Column(StringAsInt, primary_key=True) pets = relationship( - 'Pets', + "Pets", primaryjoin=( - 'foreign(Pets.person_id)' - '==cast(type_coerce(Person.id, Integer), Integer)' - ) + "foreign(Pets.person_id)" "==cast(type_coerce(Person.id, Integer), Integer)" + ), ) + class Pets(Base): - __tablename__ = 'pets' - id = Column('id', Integer, primary_key=True) - person_id = Column('person_id', Integer) + __tablename__ = "pets" + id = Column("id", Integer, primary_key=True) + person_id = Column("person_id", Integer) Above, in the :paramref:`_orm.relationship.primaryjoin` expression, we are using :func:`.type_coerce` to handle bound parameters passed via @@ -2166,8 +2173,7 @@ Column:: class MyObject(Base): # ... - json_value = Column( - JSON(none_as_null=False), nullable=False, default=JSON.NULL) + json_value = Column(JSON(none_as_null=False), nullable=False, default=JSON.NULL) Or, ensure the value is present on the object:: @@ -2182,7 +2188,6 @@ passed to :paramref:`_schema.Column.default` or :paramref:`_schema.Column.server # default=None is the same as omitting it entirely, does not apply JSON NULL json_value = Column(JSON(none_as_null=False), nullable=False, default=None) - .. seealso:: :ref:`change_3514` @@ -2195,9 +2200,11 @@ Columns no longer added redundantly with DISTINCT + ORDER BY A query such as the following will now augment only those columns that are missing from the SELECT list, without duplicates:: - q = session.query(User.id, User.name.label('name')).\ - distinct().\ - order_by(User.id, User.name, User.fullname) + q = ( + session.query(User.id, User.name.label("name")) + .distinct() + .order_by(User.id, User.name, User.fullname) + ) Produces:: @@ -2237,7 +2244,7 @@ now raises an error, whereas previously it would silently pick only the last defined validator:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(String) @@ -2250,6 +2257,7 @@ last defined validator:: def _validate_data_two(self): assert "y" in data + configure_mappers() Will raise:: @@ -2321,7 +2329,7 @@ passed through the literal quoting system:: >>> from sqlalchemy.schema import MetaData, Table, Column, CreateTable >>> from sqlalchemy.types import String - >>> t = Table('t', MetaData(), Column('x', String(), server_default="hi ' there")) + >>> t = Table("t", MetaData(), Column("x", String(), server_default="hi ' there")) >>> print(CreateTable(t)) CREATE TABLE t ( @@ -2473,7 +2481,7 @@ This includes: one less dimension. Given a column with type ``ARRAY(Integer, dimensions=3)``, we can now perform this expression:: - int_expr = col[5][6][7] # returns an Integer expression object + int_expr = col[5][6][7] # returns an Integer expression object Previously, the indexed access to ``col[5]`` would return an expression of type :class:`.Integer` where we could no longer perform indexed access @@ -2490,7 +2498,7 @@ This includes: the :class:`_postgresql.ARRAY` type, this means that it is now straightforward to produce JSON expressions with multiple levels of indexed access:: - json_expr = json_col['key1']['attr1'][5] + json_expr = json_col["key1"]["attr1"][5] * The "textual" type that is returned by indexed access of :class:`.HSTORE` as well as the "textual" type that is returned by indexed access of @@ -2520,12 +2528,11 @@ support CAST operations to each other without the "astext" aspect. This means that in most cases, an application that was doing this:: - expr = json_col['somekey'].cast(Integer) + expr = json_col["somekey"].cast(Integer) Will now need to change to this:: - expr = json_col['somekey'].astext.cast(Integer) - + expr = json_col["somekey"].astext.cast(Integer) .. _change_2729: @@ -2536,12 +2543,21 @@ A table definition like the following will now emit CREATE TYPE as expected:: enum = Enum( - 'manager', 'place_admin', 'carwash_admin', - 'parking_admin', 'service_admin', 'tire_admin', - 'mechanic', 'carwasher', 'tire_mechanic', name="work_place_roles") + "manager", + "place_admin", + "carwash_admin", + "parking_admin", + "service_admin", + "tire_admin", + "mechanic", + "carwasher", + "tire_mechanic", + name="work_place_roles", + ) + class WorkPlacement(Base): - __tablename__ = 'work_placement' + __tablename__ = "work_placement" id = Column(Integer, primary_key=True) roles = Column(ARRAY(enum)) @@ -2580,10 +2596,11 @@ The new argument :paramref:`.PGInspector.get_view_names.include` allows specification of which sub-types of views should be returned:: from sqlalchemy import inspect + insp = inspect(engine) - plain_views = insp.get_view_names(include='plain') - all_views = insp.get_view_names(include=('plain', 'materialized')) + plain_views = insp.get_view_names(include="plain") + all_views = insp.get_view_names(include=("plain", "materialized")) :ticket:`3588` @@ -2668,9 +2685,7 @@ The MySQL dialect now accepts the value "AUTOCOMMIT" for the parameters:: connection = engine.connect() - connection = connection.execution_options( - isolation_level="AUTOCOMMIT" - ) + connection = connection.execution_options(isolation_level="AUTOCOMMIT") The isolation level makes use of the various "autocommit" attributes provided by most MySQL DBAPIs. @@ -2687,10 +2702,11 @@ on an InnoDB table featured AUTO_INCREMENT on one of its columns which was not the first column, e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True, autoincrement=False), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True, autoincrement=False), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) DDL such as the following would be generated:: @@ -2720,12 +2736,13 @@ use the :class:`.PrimaryKeyConstraint` construct explicitly (1.1.0b2) (along with a KEY for the autoincrement column as required by MySQL), e.g.:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - PrimaryKeyConstraint('x', 'y'), - UniqueConstraint('y'), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + PrimaryKeyConstraint("x", "y"), + UniqueConstraint("y"), + mysql_engine="InnoDB", ) Along with the change :ref:`change_3216`, composite primary keys with @@ -2735,14 +2752,13 @@ now defaults to the value ``"auto"`` and the ``autoincrement=False`` directives are no longer needed:: t = Table( - 'some_table', metadata, - Column('x', Integer, primary_key=True), - Column('y', Integer, primary_key=True, autoincrement=True), - mysql_engine='InnoDB' + "some_table", + metadata, + Column("x", Integer, primary_key=True), + Column("y", Integer, primary_key=True, autoincrement=True), + mysql_engine="InnoDB", ) - - Dialect Improvements and Changes - SQLite ========================================= @@ -2849,8 +2865,7 @@ parameters. The four standard levels are supported as well as ``SNAPSHOT``:: engine = create_engine( - "mssql+pyodbc://scott:tiger@ms_2008", - isolation_level="REPEATABLE READ" + "mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ" ) .. seealso:: @@ -2869,12 +2884,11 @@ which includes a length, an "un-lengthed" type under SQL Server would copy the "length" parameter as the value ``"max"``:: >>> from sqlalchemy import create_engine, inspect - >>> engine = create_engine('mssql+pyodbc://scott:tiger@ms_2008', echo=True) + >>> engine = create_engine("mssql+pyodbc://scott:tiger@ms_2008", echo=True) >>> engine.execute("create table s (x varchar(max), y varbinary(max))") >>> insp = inspect(engine) >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) max max @@ -2884,8 +2898,7 @@ interprets as "max". The fix then is so that these lengths come out as None, so that the type objects work in non-SQL Server contexts:: >>> for col in insp.get_columns("s"): - ... print(col['type'].__class__, col['type'].length) - ... + ... print(col["type"].__class__, col["type"].length) None None @@ -2918,10 +2931,11 @@ This aliasing attempts to turn schema-qualified tables into aliases; given a table such as:: account_table = Table( - 'account', metadata, - Column('id', Integer, primary_key=True), - Column('info', String(100)), - schema="customer_schema" + "account", + metadata, + Column("id", Integer, primary_key=True), + Column("info", String(100)), + schema="customer_schema", ) The legacy mode of behavior will attempt to turn a schema-qualified table diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index 7073660f788..d5676e2854d 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -80,9 +80,11 @@ that is cacheable as well as more efficient. Given a query as below:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(subqueryload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(subqueryload(User.addresses)) + ) The SQL produced would be the query against ``User`` followed by the subqueryload for ``User.addresses`` (note the parameters are also listed):: @@ -106,9 +108,11 @@ subqueryload for ``User.addresses`` (note the parameters are also listed):: With "selectin" loading, we instead get a SELECT that refers to the actual primary key values loaded in the parent query:: - q = session.query(User).\ - filter(User.name.like('%ed%')).\ - options(selectinload(User.addresses)) + q = ( + session.query(User) + .filter(User.name.like("%ed%")) + .options(selectinload(User.addresses)) + ) Produces:: @@ -225,8 +229,9 @@ if not specified, the attribute defaults to ``None``:: from sqlalchemy.orm import query_expression from sqlalchemy.orm import with_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -234,9 +239,9 @@ if not specified, the attribute defaults to ``None``:: # will be None normally... expr = query_expression() + # but let's give it x + y - a1 = session.query(A).options( - with_expression(A.expr, A.x + A.y)).first() + a1 = session.query(A).options(with_expression(A.expr, A.x + A.y)).first() print(a1.expr) .. seealso:: @@ -259,10 +264,9 @@ Below, we emit a DELETE against ``SomeEntity``, adding a FROM clause (or equivalent, depending on backend) against ``SomeOtherEntity``:: - query(SomeEntity).\ - filter(SomeEntity.id==SomeOtherEntity.id).\ - filter(SomeOtherEntity.foo=='bar').\ - delete() + query(SomeEntity).filter(SomeEntity.id == SomeOtherEntity.id).filter( + SomeOtherEntity.foo == "bar" + ).delete() .. seealso:: @@ -291,28 +295,26 @@ into multiple columns/expressions:: @hybrid.hybrid_property def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) @name.update_expression def name(cls, value): - f, l = value.split(' ', 1) + f, l = value.split(" ", 1) return [(cls.first_name, f), (cls.last_name, l)] Above, an UPDATE can be rendered using:: - session.query(Person).filter(Person.id == 5).update( - {Person.name: "Dr. No"}) + session.query(Person).filter(Person.id == 5).update({Person.name: "Dr. No"}) Similar functionality is available for composites, where composite values will be broken out into their individual columns for bulk UPDATE:: session.query(Vertex).update({Edge.start: Point(3, 4)}) - .. seealso:: :ref:`hybrid_bulk_update` @@ -342,6 +344,7 @@ Python:: def name(self, value): self.first_name = value + class FirstNameLastName(FirstNameOnly): # ... @@ -349,15 +352,15 @@ Python:: @FirstNameOnly.name.getter def name(self): - return self.first_name + ' ' + self.last_name + return self.first_name + " " + self.last_name @name.setter def name(self, value): - self.first_name, self.last_name = value.split(' ', maxsplit=1) + self.first_name, self.last_name = value.split(" ", maxsplit=1) @name.expression def name(cls): - return func.concat(cls.first_name, ' ', cls.last_name) + return func.concat(cls.first_name, " ", cls.last_name) Above, the ``FirstNameOnly.name`` hybrid is referenced by the ``FirstNameLastName`` subclass in order to repurpose it specifically to the @@ -391,6 +394,7 @@ hybrid in-place, interfering with the definition on the superclass. def _set_name(self, value): self.first_name = value + class FirstNameOnly(Base): @hybrid_property def name(self): @@ -426,10 +430,12 @@ if this "append" event is the second part of a bulk replace:: from sqlalchemy.orm.attributes import OP_BULK_REPLACE + @event.listens_for(SomeObject.collection, "bulk_replace") def process_collection(target, values, initiator): values[:] = [_make_value(value) for value in values] + @event.listens_for(SomeObject.collection, "append", retval=True) def process_collection(target, value, initiator): # make sure bulk_replace didn't already do it @@ -438,7 +444,6 @@ if this "append" event is the second part of a bulk replace:: else: return value - :ticket:`3896` .. _change_3303: @@ -457,11 +462,13 @@ extension:: Base = declarative_base() + class MyDataClass(Base): - __tablename__ = 'my_data' + __tablename__ = "my_data" id = Column(Integer, primary_key=True) data = Column(MutableDict.as_mutable(JSONEncodedDict)) + @event.listens_for(MyDataClass.data, "modified") def modified_json(instance): print("json value modified:", instance.data) @@ -511,7 +518,6 @@ becomes part of the next flush process:: model = session.query(MyModel).first() model.json_set &= {1, 3} - :ticket:`3853` .. _change_3769: @@ -527,7 +533,7 @@ is an association proxy that links to ``AtoB.bvalue``, which is itself an association proxy onto ``B``:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) b_values = association_proxy("atob", "b_value") @@ -535,26 +541,26 @@ itself an association proxy onto ``B``:: class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) value = Column(String) c = relationship("C") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) value = Column(String) class AtoB(Base): - __tablename__ = 'atob' + __tablename__ = "atob" - a_id = Column(ForeignKey('a.id'), primary_key=True) - b_id = Column(ForeignKey('b.id'), primary_key=True) + a_id = Column(ForeignKey("a.id"), primary_key=True) + b_id = Column(ForeignKey("b.id"), primary_key=True) a = relationship("A", backref="atob") b = relationship("B", backref="atob") @@ -567,7 +573,7 @@ query across the two proxies ``A.b_values``, ``AtoB.b_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.b_values.contains('hi')).all() + >>> s.query(A).filter(A.b_values.contains("hi")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -581,7 +587,7 @@ to query across the two proxies ``A.c_values``, ``AtoB.c_value``: .. sourcecode:: pycon+sql - >>> s.query(A).filter(A.c_values.any(value='x')).all() + >>> s.query(A).filter(A.c_values.any(value="x")).all() {opensql}SELECT a.id AS a_id FROM a WHERE EXISTS (SELECT 1 @@ -612,8 +618,8 @@ primary key value. The example now illustrates that a new ``identity_token`` field tracks this difference so that the two objects can co-exist in the same identity map:: - tokyo = WeatherLocation('Asia', 'Tokyo') - newyork = WeatherLocation('North America', 'New York') + tokyo = WeatherLocation("Asia", "Tokyo") + newyork = WeatherLocation("North America", "New York") tokyo.reports.append(Report(80.0)) newyork.reports.append(Report(75)) @@ -632,15 +638,14 @@ same identity map:: newyork_report = newyork.reports[0] tokyo_report = tokyo.reports[0] - assert inspect(newyork_report).identity_key == (Report, (1, ), "north_america") - assert inspect(tokyo_report).identity_key == (Report, (1, ), "asia") + assert inspect(newyork_report).identity_key == (Report, (1,), "north_america") + assert inspect(tokyo_report).identity_key == (Report, (1,), "asia") # the token representing the originating shard is also available directly assert inspect(newyork_report).identity_token == "north_america" assert inspect(tokyo_report).identity_token == "asia" - :ticket:`4137` New Features and Improvements - Core @@ -673,6 +678,7 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 from sqlalchemy import Boolean from sqlalchemy import TypeDecorator + class LiberalBoolean(TypeDecorator): impl = Boolean @@ -681,7 +687,6 @@ illustrates a recipe that will allow for the "liberal" behavior of the pre-1.1 value = bool(int(value)) return value - :ticket:`4102` .. _change_3919: @@ -844,7 +849,7 @@ other comparison operators has been flattened into one level. This will have the effect of more parenthesization being generated when comparison operators are combined together, such as:: - (column('q') == null()) != (column('y') == null()) + (column("q") == null()) != (column("y") == null()) Will now generate ``(q IS NULL) != (y IS NULL)`` rather than ``q IS NULL != y IS NULL``. @@ -862,9 +867,10 @@ and columns. These are specified via the :paramref:`_schema.Table.comment` and :paramref:`_schema.Column.comment` arguments:: Table( - 'my_table', metadata, - Column('q', Integer, comment="the Q value"), - comment="my Q table" + "my_table", + metadata, + Column("q", Integer, comment="the Q value"), + comment="my Q table", ) Above, DDL will be rendered appropriately upon table create to associate @@ -891,9 +897,11 @@ the 0.7 and 0.8 series. Given a statement as:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement on a PostgreSQL backend @@ -930,7 +938,7 @@ can now be used to change the autoescape character, if desired. An expression such as:: - >>> column('x').startswith('total%score', autoescape=True) + >>> column("x").startswith("total%score", autoescape=True) Renders as:: @@ -940,7 +948,7 @@ Where the value of the parameter "x_1" is ``'total/%score'``. Similarly, an expression that has backslashes:: - >>> column('x').startswith('total/score', autoescape=True) + >>> column("x").startswith("total/score", autoescape=True) Will render the same way, with the value of the parameter "x_1" as ``'total//score'``. @@ -968,8 +976,8 @@ if the application is working with plain floats. float_value = connection.scalar( - select([literal(4.56)]) # the "BindParameter" will now be - # Float, not Numeric(asdecimal=True) + select([literal(4.56)]) # the "BindParameter" will now be + # Float, not Numeric(asdecimal=True) ) * Math operations between :class:`.Numeric`, :class:`.Float`, and @@ -978,11 +986,11 @@ if the application is working with plain floats. as well as if the type should be :class:`.Float`:: # asdecimal flag is maintained - expr = column('a', Integer) * column('b', Numeric(asdecimal=False)) + expr = column("a", Integer) * column("b", Numeric(asdecimal=False)) assert expr.type.asdecimal == False # Float subclass of Numeric is maintained - expr = column('a', Integer) * column('b', Float()) + expr = column("a", Integer) * column("b", Float()) assert isinstance(expr.type, Float) * The :class:`.Float` datatype will apply the ``float()`` processor to @@ -1009,9 +1017,7 @@ is added to the compiler to allow for the space. All three functions are named in the documentation now:: >>> from sqlalchemy import select, table, column, func, tuple_ - >>> t = table('t', - ... column('value'), column('x'), - ... column('y'), column('z'), column('q')) + >>> t = table("t", column("value"), column("x"), column("y"), column("z"), column("q")) >>> stmt = select([func.sum(t.c.value)]).group_by( ... func.grouping_sets( ... tuple_(t.c.x, t.c.y), @@ -1046,16 +1052,17 @@ localized to the current VALUES clause being processed:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - mytable = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', - Integer, default=mydefault, onupdate=mydefault) + + mytable = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) - stmt = mytable.insert().values( - [{"counter": 5}, {"counter": 18}, {"counter": 20}]) + stmt = mytable.insert().values([{"counter": 5}, {"counter": 18}, {"counter": 20}]) conn.execute(stmt) @@ -1077,7 +1084,8 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the sess = Session() - user = sess.query(User).filter_by(name='x').first() + user = sess.query(User).filter_by(name="x").first() + @event.listens_for(sess, "after_rollback") def after_rollback(session): @@ -1086,12 +1094,14 @@ of the :meth:`.SessionEvents.after_commit` event which also emits before the # to emit a lazy load. print("user name: %s" % user.name) + @event.listens_for(sess, "after_commit") def after_commit(session): # 'user.name' is present, assuming it was already # loaded. this is the existing behavior. print("user name: %s" % user.name) + if should_rollback: sess.rollback() else: @@ -1148,7 +1158,7 @@ In the case of assigning a collection to an attribute that would replace the previous collection, a side effect of this was that the collection being replaced would also be mutated, which is misleading and unnecessary:: - >>> a1, a2, a3 = Address('a1'), Address('a2'), Address('a3') + >>> a1, a2, a3 = Address("a1"), Address("a2"), Address("a3") >>> user.addresses = [a1, a2] >>> previous_collection = user.addresses @@ -1177,18 +1187,19 @@ existing collection. Given a mapping as:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - @validates('bs') + @validates("bs") def convert_dict_to_b(self, key, value): - return B(data=value['data']) + return B(data=value["data"]) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) data = Column(String) Above, we could use the validator as follows, to convert from an incoming @@ -1217,7 +1228,7 @@ are new. Supposing a simple validator such as:: class A(Base): # ... - @validates('bs') + @validates("bs") def validate_b(self, key, value): assert value.data is not None return value @@ -1255,16 +1266,16 @@ Use flag_dirty() to mark an object as "dirty" without any attribute changing An exception is now raised if the :func:`.attributes.flag_modified` function is used to mark an attribute as modified that isn't actually loaded:: - a1 = A(data='adf') + a1 = A(data="adf") s.add(a1) s.flush() # expire, similarly as though we said s.commit() - s.expire(a1, 'data') + s.expire(a1, "data") # will raise InvalidRequestError - attributes.flag_modified(a1, 'data') + attributes.flag_modified(a1, "data") This because the flush process will most likely fail in any case if the attribute remains un-present by the time flush occurs. To mark an object @@ -1287,6 +1298,7 @@ such as :meth:`.SessionEvents.before_flush`, use the new A very old and undocumented keyword argument ``scope`` has been removed:: from sqlalchemy.orm import scoped_session + Session = scoped_session(sessionmaker()) session = Session(scope=None) @@ -1312,18 +1324,21 @@ it is re-stated during the UPDATE so that the "onupdate" rule does not overwrite it:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - favorite_b_id = Column(ForeignKey('b.id', name="favorite_b_fk")) + favorite_b_id = Column(ForeignKey("b.id", name="favorite_b_fk")) bs = relationship("B", primaryjoin="A.id == B.a_id") favorite_b = relationship( - "B", primaryjoin="A.favorite_b_id == B.id", post_update=True) + "B", primaryjoin="A.favorite_b_id == B.id", post_update=True + ) updated = Column(Integer, onupdate=my_onupdate_function) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id', name="a_fk")) + a_id = Column(ForeignKey("a.id", name="a_fk")) + a1 = A() b1 = B() @@ -1371,21 +1386,18 @@ now participates in the versioning feature, documented at Given a mapping:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) version_id = Column(Integer, default=0) - parent_id = Column(ForeignKey('node.id')) - favorite_node_id = Column(ForeignKey('node.id')) + parent_id = Column(ForeignKey("node.id")) + favorite_node_id = Column(ForeignKey("node.id")) nodes = relationship("Node", primaryjoin=remote(parent_id) == id) favorite_node = relationship( - "Node", primaryjoin=favorite_node_id == remote(id), - post_update=True + "Node", primaryjoin=favorite_node_id == remote(id), post_update=True ) - __mapper_args__ = { - 'version_id_col': version_id - } + __mapper_args__ = {"version_id_col": version_id} An UPDATE of a node that associates another node as "favorite" will now increment the version counter as well as match the current version:: @@ -1435,20 +1447,20 @@ Whereas in 1.1, an expression such as the following would produce a result with no return type (assume ``-%>`` is some special operator supported by the database):: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type NullType() Other types would use the default behavior of using the left-hand type as the return type:: - >>> column('x', types.String(50)).op('-%>')(None).type + >>> column("x", types.String(50)).op("-%>")(None).type String(length=50) These behaviors were mostly by accident, so the behavior has been made consistent with the second form, that is the default return type is the same as the left-hand expression:: - >>> column('x', types.DateTime).op('-%>')(None).type + >>> column("x", types.DateTime).op("-%>")(None).type DateTime() As most user-defined operators tend to be "comparison" operators, often @@ -1457,18 +1469,18 @@ one of the many special operators defined by PostgreSQL, the its documented behavior of allowing the return type to be :class:`.Boolean` in all cases, including for :class:`_types.ARRAY` and :class:`_types.JSON`:: - >>> column('x', types.String(50)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.String(50)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.ARRAY(types.Integer)).op('-%>', is_comparison=True)(None).type + >>> column("x", types.ARRAY(types.Integer)).op("-%>", is_comparison=True)(None).type Boolean() - >>> column('x', types.JSON()).op('-%>', is_comparison=True)(None).type + >>> column("x", types.JSON()).op("-%>", is_comparison=True)(None).type Boolean() To assist with boolean comparison operators, a new shorthand method :meth:`.Operators.bool_op` has been added. This method should be preferred for on-the-fly boolean operators:: - >>> print(column('x', types.Integer).bool_op('-%>')(5)) + >>> print(column("x", types.Integer).bool_op("-%>")(5)) x -%> :x_1 @@ -1485,7 +1497,7 @@ Previously, it was not possible to produce a :obj:`_expression.literal_column` construct that stated a single percent sign:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%%symbol The percent sign is now unaffected for dialects that are not set to @@ -1494,10 +1506,10 @@ dialects which do state one of these paramstyles will continue to escape as is appropriate:: >>> from sqlalchemy import literal_column - >>> print(literal_column('some%symbol')) + >>> print(literal_column("some%symbol")) some%symbol >>> from sqlalchemy.dialects import mysql - >>> print(literal_column('some%symbol').compile(dialect=mysql.dialect())) + >>> print(literal_column("some%symbol").compile(dialect=mysql.dialect())) some%%symbol As part of this change, the doubling that has been present when using @@ -1517,8 +1529,9 @@ A bug in the :func:`_expression.collate` and :meth:`.ColumnOperators.collate` functions, used to supply ad-hoc column collations at the statement level, is fixed, where a case sensitive name would not be quoted:: - stmt = select([mytable.c.x, mytable.c.y]).\ - order_by(mytable.c.somecolumn.collate("fr_FR")) + stmt = select([mytable.c.x, mytable.c.y]).order_by( + mytable.c.somecolumn.collate("fr_FR") + ) now renders:: @@ -1553,8 +1566,8 @@ sets. The feature is off by default and can be enabled using the ``use_batch_mode`` argument on :func:`_sa.create_engine`:: engine = create_engine( - "postgresql+psycopg2://scott:tiger@host/dbname", - use_batch_mode=True) + "postgresql+psycopg2://scott:tiger@host/dbname", use_batch_mode=True + ) The feature is considered to be experimental for the moment but may become on by default in a future release. @@ -1577,10 +1590,7 @@ now allows these values to be specified:: from sqlalchemy.dialects.postgresql import INTERVAL - Table( - 'my_table', metadata, - Column("some_interval", INTERVAL(fields="DAY TO SECOND")) - ) + Table("my_table", metadata, Column("some_interval", INTERVAL(fields="DAY TO SECOND"))) Additionally, all INTERVAL datatypes can now be reflected independently of the "fields" specifier present; the "fields" parameter in the datatype @@ -1610,12 +1620,10 @@ This :class:`_expression.Insert` subclass adds a new method from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table). \ - values(id='some_id', data='some data to insert') + insert_stmt = insert(my_table).values(id="some_id", data="some data to insert") on_conflict_stmt = insert_stmt.on_duplicate_key_update( - data=insert_stmt.inserted.data, - status='U' + data=insert_stmt.inserted.data, status="U" ) conn.execute(on_conflict_stmt) @@ -1748,9 +1756,15 @@ name, rather than the raw UPPERCASE format that Oracle uses:: Previously, the foreign keys result would look like:: - [{'referred_table': u'users', 'referred_columns': [u'id'], - 'referred_schema': None, 'name': 'USER_ID_FK', - 'constrained_columns': [u'user_id']}] + [ + { + "referred_table": "users", + "referred_columns": ["id"], + "referred_schema": None, + "name": "USER_ID_FK", + "constrained_columns": ["user_id"], + } + ] Where the above could create problems particularly with Alembic autogenerate. @@ -1774,20 +1788,17 @@ now be passed using brackets to manually specify where this split occurs, allowing database and/or owner names that themselves contain one or more dots:: - Table( - "some_table", metadata, - Column("q", String(50)), - schema="[MyDataBase.dbo]" - ) + Table("some_table", metadata, Column("q", String(50)), schema="[MyDataBase.dbo]") The above table will consider the "owner" to be ``MyDataBase.dbo``, which will also be quoted upon render, and the "database" as None. To individually refer to database name and owner, use two pairs of brackets:: Table( - "some_table", metadata, + "some_table", + metadata, Column("q", String(50)), - schema="[MyDataBase.SomeDB].[MyDB.owner]" + schema="[MyDataBase.SomeDB].[MyDB.owner]", ) Additionally, the :class:`.quoted_name` construct is now honored when diff --git a/doc/build/changelog/migration_13.rst b/doc/build/changelog/migration_13.rst index f54bae329d0..a8197c6c62d 100644 --- a/doc/build/changelog/migration_13.rst +++ b/doc/build/changelog/migration_13.rst @@ -130,14 +130,17 @@ like:: j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) B_viacd = mapper( - B, j, non_primary=True, primary_key=[j.c.b_id], + B, + j, + non_primary=True, + primary_key=[j.c.b_id], properties={ "id": j.c.b_id, # so that 'id' looks the same as before - "c_id": j.c.c_id, # needed for disambiguation + "c_id": j.c.c_id, # needed for disambiguation "d_c_id": j.c.d_c_id, # needed for disambiguation "b_id": [j.c.b_id, j.c.d_b_id], "d_id": j.c.d_id, - } + }, ) A.b = relationship(B_viacd, primaryjoin=A.b_id == B_viacd.c.b_id) @@ -185,14 +188,14 @@ of collections all in one query without using JOIN or subqueries at all. Given a mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", lazy="selectin") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -349,7 +352,7 @@ where the ``del`` operation is roughly equivalent to setting the attribute to th some_object = session.query(SomeObject).get(5) - del some_object.some_attribute # from a SQL perspective, works like "= None" + del some_object.some_attribute # from a SQL perspective, works like "= None" :ticket:`4354` @@ -366,10 +369,9 @@ along with that object's full lifecycle in memory:: from sqlalchemy import inspect - u1 = User(id=7, name='ed') - - inspect(u1).info['user_info'] = '7|ed' + u1 = User(id=7, name="ed") + inspect(u1).info["user_info"] = "7|ed" :ticket:`4257` @@ -399,23 +401,22 @@ Association proxy has new cascade_scalar_deletes flag Given a mapping as:: class A(Base): - __tablename__ = 'test_a' + __tablename__ = "test_a" id = Column(Integer, primary_key=True) - ab = relationship( - 'AB', backref='a', uselist=False) + ab = relationship("AB", backref="a", uselist=False) b = association_proxy( - 'ab', 'b', creator=lambda b: AB(b=b), - cascade_scalar_deletes=True) + "ab", "b", creator=lambda b: AB(b=b), cascade_scalar_deletes=True + ) class B(Base): - __tablename__ = 'test_b' + __tablename__ = "test_b" id = Column(Integer, primary_key=True) - ab = relationship('AB', backref='b', cascade='all, delete-orphan') + ab = relationship("AB", backref="b", cascade="all, delete-orphan") class AB(Base): - __tablename__ = 'test_ab' + __tablename__ = "test_ab" a_id = Column(Integer, ForeignKey(A.id), primary_key=True) b_id = Column(Integer, ForeignKey(B.id), primary_key=True) @@ -490,7 +491,7 @@ to a class-specific :class:`.AssociationProxyInstance`, demonstrated as:: class User(Base): # ... - keywords = association_proxy('kws', 'keyword') + keywords = association_proxy("kws", "keyword") proxy_state = inspect(User).all_orm_descriptors["keywords"].for_class(User) @@ -522,6 +523,7 @@ and is **not** an object reference or another association proxy:: # column-based association proxy values = association_proxy("elements", "value") + class Element(Base): # ... @@ -530,7 +532,7 @@ and is **not** an object reference or another association proxy:: The ``User.values`` association proxy refers to the ``Element.value`` column. Standard column operations are now available, such as ``like``:: - >>> print(s.query(User).filter(User.values.like('%foo%'))) + >>> print(s.query(User).filter(User.values.like("%foo%"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -539,7 +541,7 @@ Standard column operations are now available, such as ``like``:: ``equals``:: - >>> print(s.query(User).filter(User.values == 'foo')) + >>> print(s.query(User).filter(User.values == "foo")) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -564,7 +566,7 @@ comparison operator; **this is a change in behavior** in that previously, the association proxy used ``.contains`` as a list containment operator only. With a column-oriented comparison, it now behaves like a "like":: - >>> print(s.query(User).filter(User.values.contains('foo'))) + >>> print(s.query(User).filter(User.values.contains("foo"))) SELECT "user".id AS user_id FROM "user" WHERE EXISTS (SELECT 1 @@ -579,7 +581,7 @@ When using an object-based association proxy with a collection, the behavior is as before, that of testing for collection membership, e.g. given a mapping:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) user_elements = relationship("UserElement") @@ -589,7 +591,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class UserElement(Base): - __tablename__ = 'user_element' + __tablename__ = "user_element" id = Column(Integer, primary_key=True) user_id = Column(ForeignKey("user.id")) @@ -598,7 +600,7 @@ as before, that of testing for collection membership, e.g. given a mapping:: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" id = Column(Integer, primary_key=True) value = Column(String) @@ -633,21 +635,21 @@ any use cases arise where it causes side effects. As an example, given a mapping with association proxy:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") - b_data = association_proxy('bs', 'data') + b_data = association_proxy("bs", "data") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) data = Column(String) - a1 = A(bs=[B(data='b1'), B(data='b2')]) + a1 = A(bs=[B(data="b1"), B(data="b2")]) b_data = a1.b_data @@ -671,7 +673,7 @@ Above, because the ``A`` object would be garbage collected before the The change is that the ``b_data`` collection is now maintaining a strong reference to the ``a1`` object, so that it remains present:: - assert b_data == ['b1', 'b2'] + assert b_data == ["b1", "b2"] This change introduces the side effect that if an application is passing around the collection as above, **the parent object won't be garbage collected** until @@ -699,7 +701,9 @@ new association objects where appropriate:: id = Column(Integer, primary_key=True) b_rel = relationship( - "B", collection_class=set, cascade="all, delete-orphan", + "B", + collection_class=set, + cascade="all, delete-orphan", ) b = association_proxy("b_rel", "value", creator=lambda x: B(value=x)) @@ -712,6 +716,7 @@ new association objects where appropriate:: a_id = Column(Integer, ForeignKey("test_a.id"), nullable=False) value = Column(String) + # ... s = Session(e) @@ -728,7 +733,6 @@ new association objects where appropriate:: # against the deleted ones. assert len(s.new) == 1 - :ticket:`2642` .. _change_1103: @@ -749,14 +753,14 @@ having a duplicate temporarily present in the list is intrinsic to a Python "swap" operation. Given a standard one-to-many/many-to-one setup:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) @@ -780,7 +784,7 @@ during the flush. The same issue can be demonstrated using plain duplicates:: >>> del a1.bs[1] >>> a1.bs # collection is unaffected so far... [<__main__.B object at 0x7f047af5fb70>] - >>> b1.a # however b1.a is None + >>> b1.a # however b1.a is None >>> >>> session.add(a1) >>> session.commit() # so upon flush + expire.... @@ -955,21 +959,21 @@ been removed. Previously, this did not take place for one-to-many, or one-to-one relationships, in the following situation:: class User(Base): - __tablename__ = 'users' + __tablename__ = "users" id = Column(Integer, primary_key=True) - addresses = relationship( - "Address", - passive_deletes="all") + addresses = relationship("Address", passive_deletes="all") + class Address(Base): - __tablename__ = 'addresses' + __tablename__ = "addresses" id = Column(Integer, primary_key=True) email = Column(String) - user_id = Column(Integer, ForeignKey('users.id')) + user_id = Column(Integer, ForeignKey("users.id")) user = relationship("User") + u1 = session.query(User).first() address = u1.addresses[0] u1.addresses.remove(address) @@ -1006,16 +1010,17 @@ joined together either with no separator or with an underscore separator. Below we define a convention that will name :class:`.UniqueConstraint` constraints with a name that joins together the names of all columns:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) table = Table( - 'info', metadata_obj, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer), - UniqueConstraint('a', 'b', 'c') + "info", + metadata_obj, + Column("a", Integer), + Column("b", Integer), + Column("c", Integer), + UniqueConstraint("a", "b", "c"), ) The CREATE TABLE for the above table will render as:: @@ -1037,11 +1042,12 @@ PostgreSQL where identifiers cannot be longer than 63 characters, a long constraint name would normally be generated from the table definition below:: long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) The truncation logic will ensure a too-long name isn't generated for the @@ -1137,17 +1143,16 @@ modifier to produce a :class:`.BinaryExpression` that has a "left" and a "right" side:: class Venue(Base): - __tablename__ = 'venue' + __tablename__ = "venue" id = Column(Integer, primary_key=True) name = Column(String) descendants = relationship( "Venue", - primaryjoin=func.instr( - remote(foreign(name)), name + "/" - ).as_comparison(1, 2) == 1, + primaryjoin=func.instr(remote(foreign(name)), name + "/").as_comparison(1, 2) + == 1, viewonly=True, - order_by=name + order_by=name, ) Above, the :paramref:`_orm.relationship.primaryjoin` of the "descendants" relationship @@ -1162,8 +1167,12 @@ lazyload to produce SQL like:: and a joinedload, such as:: - v1 = s.query(Venue).filter_by(name="parent1").options( - joinedload(Venue.descendants)).one() + v1 = ( + s.query(Venue) + .filter_by(name="parent1") + .options(joinedload(Venue.descendants)) + .one() + ) to work as:: @@ -1195,12 +1204,12 @@ backend, such as "SELECT CAST(NULL AS INTEGER) WHERE 1!=1" for PostgreSQL, >>> from sqlalchemy import select, literal_column, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(literal_column('1').in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... literal_column("1").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE 1 IN (SELECT CAST(NULL AS INTEGER) WHERE 1!=1) The feature also works for tuple-oriented IN statements, where the "empty IN" @@ -1211,12 +1220,12 @@ such as on PostgreSQL:: >>> from sqlalchemy import select, literal_column, tuple_, bindparam >>> e = create_engine("postgresql://scott:tiger@localhost/test", echo=True) >>> with e.connect() as conn: - ... conn.execute( - ... select([literal_column('1')]). - ... where(tuple_(50, "somestring").in_(bindparam('q', expanding=True))), - ... q=[] - ... ) - ... + ... conn.execute( + ... select([literal_column("1")]).where( + ... tuple_(50, "somestring").in_(bindparam("q", expanding=True)) + ... ), + ... q=[], + ... ) SELECT 1 WHERE (%(param_1)s, %(param_2)s) IN (SELECT CAST(NULL AS INTEGER), CAST(NULL AS VARCHAR) WHERE 1!=1) @@ -1239,6 +1248,7 @@ variant expression in order to locate these methods:: from sqlalchemy import TypeDecorator, LargeBinary, func + class CompressedLargeBinary(TypeDecorator): impl = LargeBinary @@ -1248,13 +1258,15 @@ variant expression in order to locate these methods:: def column_expression(self, col): return func.uncompress(col, type_=self) + MyLargeBinary = LargeBinary().with_variant(CompressedLargeBinary(), "sqlite") The above expression will render a function within SQL when used on SQLite only:: from sqlalchemy import select, column from sqlalchemy.dialects import sqlite - print(select([column('x', CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) + + print(select([column("x", CompressedLargeBinary)]).compile(dialect=sqlite.dialect())) will render:: @@ -1445,17 +1457,20 @@ queries used until now. Given a schema such as:: dv = Table( - 'data_values', metadata_obj, - Column('modulus', Integer, nullable=False), - Column('data', String(30)), - postgresql_partition_by='range(modulus)') + "data_values", + metadata_obj, + Column("modulus", Integer, nullable=False), + Column("data", String(30)), + postgresql_partition_by="range(modulus)", + ) sa.event.listen( dv, "after_create", sa.DDL( "CREATE TABLE data_values_4_10 PARTITION OF data_values " - "FOR VALUES FROM (4) TO (10)") + "FOR VALUES FROM (4) TO (10)" + ), ) The two table names ``'data_values'`` and ``'data_values_4_10'`` will come @@ -1492,9 +1507,7 @@ can now be explicitly ordered by passing a list of 2-tuples:: from sqlalchemy.dialects.mysql import insert - insert_stmt = insert(my_table).values( - id='some_existing_id', - data='inserted value') + insert_stmt = insert(my_table).values(id="some_existing_id", data="inserted value") on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update( [ @@ -1542,10 +1555,11 @@ keyword added to objects like :class:`.UniqueConstraint` as well as several :class:`_schema.Column` -specific variants:: some_table = Table( - 'some_table', metadata_obj, - Column('id', Integer, primary_key=True, sqlite_on_conflict_primary_key='FAIL'), - Column('data', Integer), - UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE') + "some_table", + metadata_obj, + Column("id", Integer, primary_key=True, sqlite_on_conflict_primary_key="FAIL"), + Column("data", Integer), + UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"), ) The above table would render in a CREATE TABLE statement as:: @@ -1651,7 +1665,8 @@ Pass it via :func:`_sa.create_engine`:: engine = create_engine( "mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server", - fast_executemany=True) + fast_executemany=True, + ) .. seealso:: @@ -1678,12 +1693,16 @@ new ``mssql_identity_start`` and ``mssql_identity_increment`` parameters on :class:`_schema.Column`:: test = Table( - 'test', metadata_obj, + "test", + metadata_obj, Column( - 'id', Integer, primary_key=True, mssql_identity_start=100, - mssql_identity_increment=10 + "id", + Integer, + primary_key=True, + mssql_identity_start=100, + mssql_identity_increment=10, ), - Column('name', String(20)) + Column("name", String(20)), ) In order to emit ``IDENTITY`` on a non-primary key column, which is a little-used @@ -1693,9 +1712,10 @@ primary key column:: test = Table( - 'test', metadata_obj, - Column('id', Integer, primary_key=True, autoincrement=False), - Column('number', Integer, autoincrement=True) + "test", + metadata_obj, + Column("id", Integer, primary_key=True, autoincrement=False), + Column("number", Integer, autoincrement=True), ) .. seealso:: diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index b6cce48849d..089715bf6dd 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -70,9 +70,12 @@ to be used freely against ORM entities:: with Session(engine, future=True) as sess: - stmt = select(User).where( - User.name == 'sandy' - ).join(User.addresses).where(Address.email_address.like("%gmail%")) + stmt = ( + select(User) + .where(User.name == "sandy") + .join(User.addresses) + .where(Address.email_address.like("%gmail%")) + ) result = sess.execute(stmt) @@ -121,16 +124,19 @@ Similar adjustments have been made to "bulk updates and deletes" such that Core :func:`_sql.update` and :func:`_sql.delete` can be used for bulk operations. A bulk update like the following:: - session.query(User).filter(User.name == 'sandy').update({"password": "foobar"}, synchronize_session="fetch") + session.query(User).filter(User.name == "sandy").update( + {"password": "foobar"}, synchronize_session="fetch" + ) can now be achieved in :term:`2.0 style` (and indeed the above runs internally in this way) as follows:: with Session(engine, future=True) as sess: - stmt = update(User).where( - User.name == 'sandy' - ).values(password="foobar").execution_options( - synchronize_session="fetch" + stmt = ( + update(User) + .where(User.name == "sandy") + .values(password="foobar") + .execution_options(synchronize_session="fetch") ) sess.execute(stmt) @@ -676,7 +682,7 @@ that are in the columns clause of the SELECT statement. A common beginner mist is code such as the following:: stmt = select(users) - stmt = stmt.where(stmt.c.name == 'foo') + stmt = stmt.where(stmt.c.name == "foo") The above code appears intuitive and that it would generate "SELECT * FROM users WHERE name='foo'", however veteran SQLAlchemy users will @@ -688,8 +694,7 @@ the use case above, as in a case like the above it links directly to the columns present in the ``users.c`` collection:: stmt = select(users) - stmt = stmt.where(stmt.selected_columns.name == 'foo') - + stmt = stmt.where(stmt.selected_columns.name == "foo") :ticket:`4617` @@ -745,7 +750,9 @@ With the new implementation, :meth:`_sql.Select.join` and :meth:`_orm.Query.join`, adding JOIN criteria to the existing statement by matching to the left entity:: - stmt = select(user_table).join(addresses_table, user_table.c.id == addresses_table.c.user_id) + stmt = select(user_table).join( + addresses_table, user_table.c.id == addresses_table.c.user_id + ) producing:: @@ -839,7 +846,7 @@ returns a new :class:`_engine.URL` object with changes applied:: To alter the contents of the :attr:`_engine.URL.query` dictionary, methods such as :meth:`_engine.URL.update_query_dict` may be used:: - >>> url.update_query_dict({"sslcert": '/path/to/crt'}) + >>> url.update_query_dict({"sslcert": "/path/to/crt"}) postgresql://user:***@host/dbname?sslcert=%2Fpath%2Fto%2Fcrt To upgrade code that is mutating these fields directly, a **backwards and @@ -855,6 +862,7 @@ style:: some_url.drivername = some_drivername return some_url + def set_ssl_cert(some_url, ssl_cert): # check for 1.4 if hasattr(some_url, "update_query_dict"): @@ -869,7 +877,9 @@ to strings, using sequences of strings to represent multiple parameters. For example:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&sslcert=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'sslcert': '/path/to/crt'}) @@ -901,25 +911,24 @@ method. A backwards compatible approach would look like:: from sqlalchemy.engine import CreateEnginePlugin + class MyPlugin(CreateEnginePlugin): def __init__(self, url, kwargs): # check for 1.4 style if hasattr(CreateEnginePlugin, "update_url"): - self.my_argument_one = url.query['my_argument_one'] - self.my_argument_two = url.query['my_argument_two'] + self.my_argument_one = url.query["my_argument_one"] + self.my_argument_two = url.query["my_argument_two"] else: # legacy - self.my_argument_one = url.query.pop('my_argument_one') - self.my_argument_two = url.query.pop('my_argument_two') + self.my_argument_one = url.query.pop("my_argument_one") + self.my_argument_two = url.query.pop("my_argument_two") - self.my_argument_three = kwargs.pop('my_argument_three', None) + self.my_argument_three = kwargs.pop("my_argument_three", None) def update_url(self, url): # this method runs in 1.4 only and should be used to consume # plugin-specific arguments - return url.difference_update_query( - ["my_argument_one", "my_argument_two"] - ) + return url.difference_update_query(["my_argument_one", "my_argument_two"]) See the docstring at :class:`_engine.CreateEnginePlugin` for complete details on how this class is used. @@ -974,9 +983,9 @@ track for the old calling style:: stmt = select(users_table).where( case( - (users_table.c.name == 'wendy', 'W'), - (users_table.c.name == 'jack', 'J'), - else_='E' + (users_table.c.name == "wendy", "W"), + (users_table.c.name == "jack", "J"), + else_="E", ) ) @@ -1128,9 +1137,11 @@ not line up with these two tables will create an additional FROM entry:: address_alias = aliased(Address) - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo') + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + ) The above query selects from a JOIN of ``User`` and ``address_alias``, the latter of which is an alias of the ``Address`` entity. However, the @@ -1189,11 +1200,13 @@ JOIN clauses but also through the WHERE clause Above, we can add a WHERE clause to link the new ``Address`` entity with the previous ``address_alias`` entity and that will remove the warning:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id == address_alias.id) # resolve cartesian products, - # will no longer warn + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id == address_alias.id) + ) # resolve cartesian products, + # will no longer warn The cartesian product warning considers **any** kind of link between two FROM clauses to be a resolution, even if the end result set is still @@ -1201,11 +1214,13 @@ wasteful, as the linter is intended only to detect the common case of a FROM clause that is completely unexpected. If the FROM clause is referred to explicitly elsewhere and linked to the other FROMs, no warning is emitted:: - q = session.query(User).\ - join(address_alias, User.addresses).\ - filter(Address.email_address == 'foo').\ - filter(Address.id > address_alias.id) # will generate a lot of rows, - # but no warning + q = ( + session.query(User) + .join(address_alias, User.addresses) + .filter(Address.email_address == "foo") + .filter(Address.id > address_alias.id) + ) # will generate a lot of rows, + # but no warning Full cartesian products are also allowed if they are explicitly stated; if we wanted for example the cartesian product of ``User`` and ``Address``, we can @@ -1256,7 +1271,6 @@ including methods such as: with engine.connect() as conn: row = conn.execute(table.select().where(table.c.id == 5)).one() - :meth:`_engine.Result.one_or_none` - same, but also returns None for no rows :meth:`_engine.Result.all` - returns all rows @@ -1278,12 +1292,12 @@ including methods such as: .. sourcecode:: with engine.connect() as conn: - # requests x, y, z - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + # requests x, y, z + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - # iterate rows as y, x - for y, x in result.columns("y", "x"): - print("Y: %s X: %s" % (y, x)) + # iterate rows as y, x + for y, x in result.columns("y", "x"): + print("Y: %s X: %s" % (y, x)) :meth:`_engine.Result.scalars` - returns lists of scalar objects, from the first column by default but can also be selected: @@ -1300,10 +1314,10 @@ dictionaries: .. sourcecode:: with engine.connect() as conn: - result = conn.execute(select(table.c.x, table.c.y, table.c.z)) + result = conn.execute(select(table.c.x, table.c.y, table.c.z)) - for map_ in result.mappings(): - print("Y: %(y)s X: %(x)s" % map_) + for map_ in result.mappings(): + print("Y: %(y)s X: %(x)s" % map_) When using Core, the object returned by :meth:`_engine.Connection.execute` is an instance of :class:`.CursorResult`, which continues to feature the same API @@ -1374,8 +1388,8 @@ can be summarized. Given a "named tuple" in pseudo code as:: The biggest cross-incompatible difference is the behavior of ``__contains__``:: - "id" in row # True for a mapping, False for a named tuple - "some name" in row # False for a mapping, True for a named tuple + "id" in row # True for a mapping, False for a named tuple + "some name" in row # False for a mapping, True for a named tuple In 1.4, when a :class:`.LegacyRow` is returned by a Core result set, the above ``"id" in row`` comparison will continue to succeed, however a deprecation @@ -1402,7 +1416,7 @@ when the row was first fetched. This means for example when retrieving a datetime value from SQLite, the data for the row as present in the :class:`.RowProxy` object would previously have looked like:: - row_proxy = (1, '2019-12-31 19:56:58.272106') + row_proxy = (1, "2019-12-31 19:56:58.272106") and then upon access via ``__getitem__``, the ``datetime.strptime()`` function would be used on the fly to convert the above string date into a ``datetime`` @@ -1478,8 +1492,8 @@ allows for greater cross-compatibility between the two, which is a key goal of the 2.0 transition:: >>> from sqlalchemy import column, select - >>> c1, c2, c3, c4 = column('c1'), column('c2'), column('c3'), column('c4') - >>> stmt = select(c1, c2, c3.label('c2'), c2, c4) + >>> c1, c2, c3, c4 = column("c1"), column("c2"), column("c3"), column("c4") + >>> stmt = select(c1, c2, c3.label("c2"), c2, c4) >>> print(stmt) SELECT c1, c2, c3 AS c2, c2, c4 @@ -1522,7 +1536,7 @@ does not imply deduplication of column objects, although it does imply deduplication of implicitly generated labels:: >>> from sqlalchemy import table - >>> user = table('user', column('id'), column('name')) + >>> user = table("user", column("id"), column("name")) >>> stmt = select(user.c.id, user.c.name, user.c.id).apply_labels() >>> print(stmt) SELECT "user".id AS user_id, "user".name AS user_name, "user".id AS id_1 @@ -1606,7 +1620,7 @@ prominently with CAST:: For CAST against expressions that don't have a name, the previous logic is used to generate the usual "anonymous" labels:: - >>> print(select(cast('hi there,' + foo.c.data, String))) + >>> print(select(cast("hi there," + foo.c.data, String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS anon_1 FROM foo @@ -1614,14 +1628,14 @@ A :func:`.cast` against a :class:`.Label`, despite having to omit the label expression as these don't render inside of a CAST, will nonetheless make use of the given name:: - >>> print(select(cast(('hi there,' + foo.c.data).label('hello_data'), String))) + >>> print(select(cast(("hi there," + foo.c.data).label("hello_data"), String))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo And of course as was always the case, :class:`.Label` can be applied to the expression on the outside to apply an "AS " label directly:: - >>> print(select(cast(('hi there,' + foo.c.data), String).label('hello_data'))) + >>> print(select(cast(("hi there," + foo.c.data), String).label("hello_data"))) SELECT CAST(:data_1 + foo.data AS VARCHAR) AS hello_data FROM foo @@ -1768,7 +1782,6 @@ flags to ``True``:: boolean = Column(Boolean(create_constraint=True)) enum = Column(Enum("a", "b", "c", create_constraint=True)) - :ticket:`5367` New Features - ORM @@ -1796,13 +1809,14 @@ To configure column-level raiseload on a mapping, the the attribute:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() It was originally considered that the existing :func:`.raiseload` option that @@ -1810,8 +1824,7 @@ works for :func:`_orm.relationship` attributes be expanded to also support colum attributes. However, this would break the "wildcard" behavior of :func:`.raiseload`, which is documented as allowing one to prevent all relationships from loading:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) Above, if we had expanded :func:`.raiseload` to accommodate for columns as well, the wildcard would also prevent columns from loading and thus be a @@ -2003,11 +2016,7 @@ as entity / column should work:: row._mapping[u1] # same as row[0] - row = ( - s.query(User.id, Address.email_address) - .join(User.addresses) - .first() - ) + row = s.query(User.id, Address.email_address).join(User.addresses).first() row._mapping[User.id] # same as row[0] row._mapping["id"] # same as row[0] @@ -2202,13 +2211,11 @@ use of the :paramref:`_orm.Session.future` flag to :term:`2.0-style` mode:: Session = sessionmaker(engine, future=True) with Session() as session: - u1 = User() - session.add(u1) - - a1 = Address() - a1.user = u1 # <--- will not add "a1" to the Session - + u1 = User() + session.add(u1) + a1 = Address() + a1.user = u1 # <--- will not add "a1" to the Session :ticket:`5150` @@ -2225,7 +2232,7 @@ selectin/subquery loaders will run an "immediateload" operation for a given relationship, when an expired object is unexpired or an object is refreshed:: >>> a1 = session.query(A).options(joinedload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() Above, the ``A`` object was loaded with a ``joinedload()`` option associated @@ -2251,7 +2258,7 @@ a refresh scenario, which resembles the query emitted by "lazyload", emitted as an additional query:: >>> a1 = session.query(A).options(selectinload(A.bs)).first() - >>> a1.data = 'new data' + >>> a1.data = "new data" >>> session.commit() >>> a1.data SELECT a.id AS a_id, a.data AS a_data @@ -2333,9 +2340,11 @@ eventually identified in :ticket:`4519` where this empty collection could be harmful, which is when the object is merged into a session:: >>> u1 = User(id=1) # create an empty User to merge with id=1 in the database - >>> merged1 = session.merge(u1) # value of merged1.addresses is unchanged from that of the DB + >>> merged1 = session.merge( + ... u1 + ... ) # value of merged1.addresses is unchanged from that of the DB - >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database + >>> u2 = User(id=2) # create an empty User to merge with id=2 in the database >>> u2.addresses [] >>> merged2 = session.merge(u2) # value of merged2.addresses has been emptied in the DB @@ -2364,7 +2373,9 @@ however is not added to ``__dict__`` until it is actually mutated:: >>> u1 = User() >>> l1 = u1.addresses # new list is created, associated with the state >>> assert u1.addresses is l1 # you get the same list each time you access it - >>> assert "addresses" not in u1.__dict__ # but it won't go into __dict__ until it's mutated + >>> assert ( + ... "addresses" not in u1.__dict__ + ... ) # but it won't go into __dict__ until it's mutated >>> from sqlalchemy import inspect >>> inspect(u1).attrs.addresses.history History(added=None, unchanged=None, deleted=None) @@ -2386,7 +2397,9 @@ the object contains certain values based on its ``__dict__``:: >>> u1.addresses [] # this will now fail, would pass before - >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == {"addresses": []} + >>> assert {k: v for k, v in u1.__dict__.items() if not k.startswith("_")} == { + ... "addresses": [] + ... } or to ensure that the collection won't require a lazy load to proceed, the (admittedly awkward) code below will now also fail:: @@ -2415,10 +2428,11 @@ SQLAlchemy has always had logic to detect when an object in the :class:`.Session to be inserted has the same primary key as an object that is already present:: class Product(Base): - __tablename__ = 'product' + __tablename__ = "product" id = Column(Integer, primary_key=True) + session = Session(engine) # add Product with primary key 1 @@ -2500,8 +2514,7 @@ disallowed:: # ... # this is now an error - addresses = relationship( - "Address", viewonly=True, cascade="all, delete-orphan") + addresses = relationship("Address", viewonly=True, cascade="all, delete-orphan") The above will raise:: @@ -2542,10 +2555,7 @@ inheritance mapping:: s.commit() - print( - s.query(Manager).select_entity_from(s.query(Employee).subquery()).all() - ) - + print(s.query(Manager).select_entity_from(s.query(Employee).subquery()).all()) The subquery selects both the ``Engineer`` and the ``Manager`` rows, and even though the outer query is against ``Manager``, we get a non ``Manager`` @@ -2818,8 +2828,9 @@ effect. When "optional" is used on a :class:`.Sequence` that is present in the integer primary key column of a table:: Table( - "some_table", metadata, - Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True) + "some_table", + metadata, + Column("id", Integer, Sequence("some_seq", optional=True), primary_key=True), ) The above :class:`.Sequence` is only used for DDL and INSERT statements if the diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 626574cc061..105108434f6 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -234,7 +234,6 @@ as a bonus our program is much clearer:: print(result.fetchall()) - The goal of "2.0 deprecations mode" is that a program which runs with no :class:`_exc.RemovedIn20Warning` warnings with "2.0 deprecations mode" turned on is then ready to run in SQLAlchemy 2.0. @@ -262,24 +261,23 @@ the SQLAlchemy project itself, the approach taken is as follows: from sqlalchemy import exc # for warnings not included in regex-based filter below, just log - warnings.filterwarnings( - "always", category=exc.RemovedIn20Warning - ) + warnings.filterwarnings("always", category=exc.RemovedIn20Warning) # for warnings related to execute() / scalar(), raise for msg in [ r"The (?:Executable|Engine)\.(?:execute|scalar)\(\) function", - r"The current statement is being autocommitted using implicit " - "autocommit,", + r"The current statement is being autocommitted using implicit " "autocommit,", r"The connection.execute\(\) method in SQLAlchemy 2.0 will accept " "parameters as a single dictionary or a single sequence of " "dictionaries only.", r"The Connection.connect\(\) function/method is considered legacy", r".*DefaultGenerator.execute\(\)", ]: - warnings.filterwarnings( - "error", message=msg, category=exc.RemovedIn20Warning, - ) + warnings.filterwarnings( + "error", + message=msg, + category=exc.RemovedIn20Warning, + ) 3. As each sub-category of warnings are resolved in the application, new warnings that are caught by the "always" filter can be added to the list @@ -325,8 +323,6 @@ The new engine is described at :class:`_future.Engine` which delivers a new conn.commit() # commit as you go - - Migration to 2.0 Step Five - Use the ``future`` flag on Session --------------------------------------------------------------- @@ -360,6 +356,7 @@ in 1.4 which are now closely matched to the patterns used by the :class:`_orm.Session` may be used as a context manager:: from sqlalchemy.orm import Session + with Session(engine) as session: session.add(MyObject()) session.commit() @@ -405,7 +402,7 @@ the underlying DBAPI transaction, but in SQLAlchemy conn = engine.connect() # won't autocommit in 2.0 - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) Nor will this autocommit:: @@ -421,10 +418,7 @@ execution option, will be removed:: conn = engine.connect() # won't autocommit in 2.0 - conn.execute( - text("EXEC my_procedural_thing()").execution_options(autocommit=True) - ) - + conn.execute(text("EXEC my_procedural_thing()").execution_options(autocommit=True)) **Migration to 2.0** @@ -433,13 +427,13 @@ style` execution is to make use of the :meth:`_engine.Connection.begin` method, or the :meth:`_engine.Engine.begin` context manager:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.connect() as conn: with conn.begin(): - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) with engine.begin() as conn: conn.execute(text("EXEC my_procedural_thing()")) @@ -451,8 +445,8 @@ when a statement is first invoked in the absence of an explicit call to :meth:`_future.Connection.begin`:: with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) - conn.execute(some_other_table.insert().values(bat='hoho')) + conn.execute(some_table.insert().values(foo="bar")) + conn.execute(some_other_table.insert().values(bat="hoho")) conn.commit() @@ -490,7 +484,7 @@ explicit as to how the transaction should be used. For the vast majority of Core use cases, it's the pattern that is already recommended:: with engine.begin() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) For "commit as you go, or rollback instead" usage, which resembles how the :class:`_orm.Session` is normally used today, the "future" version of @@ -508,7 +502,7 @@ a statement is first invoked:: engine = create_engine(..., future=True) with engine.connect() as conn: - conn.execute(some_table.insert().values(foo='bar')) + conn.execute(some_table.insert().values(foo="bar")) conn.commit() conn.execute(text("some other SQL")) @@ -558,11 +552,11 @@ execution patterns, is removed:: metadata_obj = MetaData(bind=engine) # no longer supported - metadata_obj.create_all() # requires Engine or Connection + metadata_obj.create_all() # requires Engine or Connection metadata_obj.reflect() # requires Engine or Connection - t = Table('t', metadata_obj, autoload=True) # use autoload_with=engine + t = Table("t", metadata_obj, autoload=True) # use autoload_with=engine result = engine.execute(t.select()) # no longer supported @@ -592,7 +586,7 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(engine) # reflect individual table - t = Table('t', metadata_obj, autoload_with=engine) + t = Table("t", metadata_obj, autoload_with=engine) # connection level: @@ -607,12 +601,11 @@ the ORM-level :meth:`_orm.Session.execute` method):: metadata_obj.reflect(connection) # reflect individual table - t = Table('t', metadata_obj, autoload_with=connection) + t = Table("t", metadata_obj, autoload_with=connection) # execute SQL statements result = conn.execute(t.select()) - **Discussion** @@ -736,7 +729,6 @@ in the case that the operation is a write operation:: with conn.begin(): result = conn.execute(stmt) - execute() method more strict, execution options are more prominent ------------------------------------------------------------------------------- @@ -756,18 +748,16 @@ require modification:: # positional parameters no longer supported, only named # unless using exec_driver_sql() - result = connection.execute(table.insert(), ('x', 'y', 'z')) + result = connection.execute(table.insert(), ("x", "y", "z")) # **kwargs no longer accepted, pass a single dictionary result = connection.execute(table.insert(), x=10, y=5) # multiple *args no longer accepted, pass a list result = connection.execute( - table.insert(), - {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} + table.insert(), {"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8} ) - **Migration to 2.0** The new :meth:`_future.Connection.execute` method now accepts a subset of the @@ -778,6 +768,7 @@ method, so the following code is cross-compatible between 1.x and 2.0:: connection = engine.connect() from sqlalchemy import text + result = connection.execute(text("select * from table")) # pass a single dictionary for single statement execution @@ -785,12 +776,9 @@ method, so the following code is cross-compatible between 1.x and 2.0:: # pass a list of dictionaries for executemany result = connection.execute( - table.insert(), - [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] + table.insert(), [{"x": 10, "y": 5}, {"x": 15, "y": 12}, {"x": 9, "y": 8}] ) - - **Discussion** The use of ``*args`` and ``**kwargs`` has been removed both to remove the @@ -832,11 +820,10 @@ tuples when using "future" mode:: row = result.first() # suppose the row is (1, 2) - "x" in row # evaluates to False, in 1.x / future=False, this would be True + "x" in row # evaluates to False, in 1.x / future=False, this would be True 1 in row # evaluates to True, in 1.x / future=False, this would be False - **Migration to 2.0** Application code or test suites that are testing for a particular key @@ -881,10 +868,7 @@ or attribute:: stmt = select(User, Address).join(User.addresses) for row in session.execute(stmt).mappings(): - print("the user is: %s the address is: %s" % ( - row[User], - row[Address] - )) + print("the user is: %s the address is: %s" % (row[User], row[Address])) .. seealso:: @@ -921,14 +905,10 @@ now accepts its WHEN criteria positionally, rather than as a list:: # list emits a deprecation warning case_clause = case( - [ - (table.c.x == 5, "five"), - (table.c.x == 7, "seven") - ], - else_="neither five nor seven" + [(table.c.x == 5, "five"), (table.c.x == 7, "seven")], + else_="neither five nor seven", ) - **Migration to 2.0** Only the "generative" style of :func:`_sql.select` will be supported. The list @@ -951,9 +931,7 @@ is cross-compatible with 1.4 and 2.0:: # case conditions passed positionally case_clause = case( - (table.c.x == 5, "five"), - (table.c.x == 7, "seven"), - else_="neither five nor seven" + (table.c.x == 5, "five"), (table.c.x == 7, "seven"), else_="neither five nor seven" ) **Discussion** @@ -973,7 +951,7 @@ documented style in the Core tutorial. Examples of "structural" vs. "data" elements are as follows:: # table columns for CREATE TABLE - structural - table = Table("table", metadata_obj, Column('x', Integer), Column('y', Integer)) + table = Table("table", metadata_obj, Column("x", Integer), Column("y", Integer)) # columns in a SELECT statement - structural stmt = select(table.c.x, table.c.y) @@ -1006,10 +984,7 @@ constructor arguments to :func:`_sql.insert`, :func:`_sql.update` and stmt = table.delete(table.c.x > 15) # no longer supported - stmt = table.update( - table.c.x < 15, - preserve_parameter_order=True - ).values( + stmt = table.update(table.c.x < 15, preserve_parameter_order=True).values( [(table.c.y, 20), (table.c.x, table.c.y + 10)] ) @@ -1028,10 +1003,12 @@ examples:: stmt = table.delete().where(table.c.x > 15) # use generative methods, ordered_values() replaces preserve_parameter_order - stmt = table.update().where( - table.c.x < 15, - ).ordered_values( - (table.c.y, 20), (table.c.x, table.c.y + 10) + stmt = ( + table.update() + .where( + table.c.x < 15, + ) + .ordered_values((table.c.y, 20), (table.c.x, table.c.y + 10)) ) **Discussion** @@ -1102,9 +1079,7 @@ Code that works with classical mappings should change imports and code from:: from sqlalchemy.orm import mapper - mapper(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper(SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)}) To work from a central :class:`_orm.registry` object:: @@ -1112,9 +1087,9 @@ To work from a central :class:`_orm.registry` object:: mapper_reg = registry() - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship(SomeRelatedClass) - }) + mapper_reg.map_imperatively( + SomeClass, some_table, properties={"related": relationship(SomeRelatedClass)} + ) The above :class:`_orm.registry` is also the source for declarative mappings, and classical mappings now have access to this registry including string-based @@ -1126,19 +1101,23 @@ configuration on :func:`_orm.relationship`:: Base = mapper_reg.generate_base() + class SomeRelatedClass(Base): - __tablename__ = 'related' + __tablename__ = "related" # ... - mapper_reg.map_imperatively(SomeClass, some_table, properties={ - "related": relationship( - "SomeRelatedClass", - primaryjoin="SomeRelatedClass.related_id == SomeClass.id" - ) - }) - + mapper_reg.map_imperatively( + SomeClass, + some_table, + properties={ + "related": relationship( + "SomeRelatedClass", + primaryjoin="SomeRelatedClass.related_id == SomeClass.id", + ) + }, + ) **Discussion** @@ -1203,9 +1182,7 @@ following the table, and may include additional notes not summarized here. - :: - session.execute( - select(User) - ).scalars().all() + session.execute(select(User)).scalars().all() # or session.scalars(select(User)).all() @@ -1216,15 +1193,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').one() + session.query(User).filter_by(name="some user").one() - :: - session.execute( - select(User). - filter_by(name="some user") - ).scalar_one() + session.execute(select(User).filter_by(name="some user")).scalar_one() - :ref:`migration_20_unify_select` @@ -1232,17 +1205,11 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter_by(name='some user').first() - + session.query(User).filter_by(name="some user").first() - :: - session.scalars( - select(User). - filter_by(name="some user"). - limit(1) - ).first() + session.scalars(select(User).filter_by(name="some user").limit(1)).first() - :ref:`migration_20_unify_select` @@ -1250,34 +1217,22 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).options( - joinedload(User.addresses) - ).all() + session.query(User).options(joinedload(User.addresses)).all() - :: - session.scalars( - select(User). - options( - joinedload(User.addresses) - ) - ).unique().all() + session.scalars(select(User).options(joinedload(User.addresses))).unique().all() - :ref:`joinedload_not_uniqued` * - :: - session.query(User).\ - join(Address).\ - filter(Address.email == 'e@sa.us').\ - all() + session.query(User).join(Address).filter(Address.email == "e@sa.us").all() - :: session.execute( - select(User). - join(Address). - where(Address.email == 'e@sa.us') + select(User).join(Address).where(Address.email == "e@sa.us") ).scalars().all() - :ref:`migration_20_unify_select` @@ -1286,37 +1241,27 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).from_statement( - text("select * from users") - ).all() + session.query(User).from_statement(text("select * from users")).all() - :: - session.scalars( - select(User). - from_statement( - text("select * from users") - ) - ).all() + session.scalars(select(User).from_statement(text("select * from users"))).all() - :ref:`orm_queryguide_selecting_text` * - :: - session.query(User).\ - join(User.addresses).\ - options( - contains_eager(User.addresses) - ).\ - populate_existing().all() + session.query(User).join(User.addresses).options( + contains_eager(User.addresses) + ).populate_existing().all() - :: session.execute( - select(User). - join(User.addresses). - options(contains_eager(User.addresses)). - execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .options(contains_eager(User.addresses)) + .execution_options(populate_existing=True) ).scalars().all() - @@ -1328,21 +1273,17 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).\ - filter(User.name == 'foo').\ - update( - {"fullname": "Foo Bar"}, - synchronize_session="evaluate" - ) - + session.query(User).filter(User.name == "foo").update( + {"fullname": "Foo Bar"}, synchronize_session="evaluate" + ) - :: session.execute( - update(User). - where(User.name == 'foo'). - values(fullname="Foo Bar"). - execution_options(synchronize_session="evaluate") + update(User) + .where(User.name == "foo") + .values(fullname="Foo Bar") + .execution_options(synchronize_session="evaluate") ) - :ref:`orm_expression_update_delete` @@ -1575,7 +1516,6 @@ will all be removed in 2.0:: # string use removed q = session.query(Address).filter(with_parent(u1, "addresses")) - **Migration to 2.0** Modern SQLAlchemy 1.x versions support the recommended technique which @@ -1622,7 +1562,6 @@ attributes in a list will be removed:: # chaining removed q = session.query(User).join("orders", "items", "keywords") - **Migration to 2.0** Use individual calls to :meth:`_orm.Query.join` for 1.x /2.0 cross compatible @@ -1671,11 +1610,13 @@ Use explicit aliases instead:: n1 = aliased(Node) n2 = aliased(Node) - q = select(Node).join(Node.children.of_type(n1)).\ - where(n1.name == "some sub child").\ - join(n1.children.of_type(n2)).\ - where(n2.name == "some sub child") - + q = ( + select(Node) + .join(Node.children.of_type(n1)) + .where(n1.name == "some sub child") + .join(n1.children.of_type(n2)) + .where(n2.name == "some sub child") + ) **Discussion** @@ -1714,8 +1655,13 @@ as well as "address.email_address" but only return User objects:: # 1.xx code - result = session.query(User).join(User.addresses).\ - distinct().order_by(Address.email_address).all() + result = ( + session.query(User) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + .all() + ) In version 2.0, the "email_address" column will not be automatically added to the columns clause, and the above query will fail, since relational @@ -1730,8 +1676,12 @@ returning the main entity object, and not the extra column, use the # 1.4 / 2.0 code - stmt = select(User, Address.email_address).join(User.addresses).\ - distinct().order_by(Address.email_address) + stmt = ( + select(User, Address.email_address) + .join(User.addresses) + .distinct() + .order_by(Address.email_address) + ) result = session.execute(stmt).columns(User).all() @@ -1758,10 +1708,12 @@ Selecting from the query itself as a subquery, e.g. "from_self()" The :meth:`_orm.Query.from_self` method will be removed from :class:`_orm.Query`:: # from_self is removed - q = session.query(User, Address.email_address).\ - join(User.addresses).\ - from_self(User).order_by(Address.email_address) - + q = ( + session.query(User, Address.email_address) + .join(User.addresses) + .from_self(User) + .order_by(Address.email_address) + ) **Migration to 2.0** @@ -1775,8 +1727,7 @@ since the final query wants to query in terms of both the ``User`` and from sqlalchemy.orm import aliased - subq = session.query(User, Address.email_address).\ - join(User.addresses).subquery() + subq = session.query(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1788,8 +1739,7 @@ The same form may be used in :term:`2.0 style`:: from sqlalchemy.orm import aliased - subq = select(User, Address.email_address).\ - join(User.addresses).subquery() + subq = select(User, Address.email_address).join(User.addresses).subquery() ua = aliased(User, subq) @@ -1799,7 +1749,6 @@ The same form may be used in :term:`2.0 style`:: result = session.execute(stmt) - **Discussion** The :meth:`_query.Query.from_self` method is a very complicated method that is rarely @@ -1832,8 +1781,7 @@ labeling:: # 1.4 / 2.0 code - subq = select(User, Address).\ - join(User.addresses).subquery() + subq = select(User, Address).join(User.addresses).subquery() ua = aliased(User, subq) aa = aliased(Address, subq) @@ -1923,9 +1871,7 @@ where the "joined eager loading" loader strategy is used with collections:: # In the new API, uniquing is available but not implicitly # enabled - result = session.execute( - select(User).options(joinedload(User.addresses)) - ) + result = session.execute(select(User).options(joinedload(User.addresses))) # this actually will raise an error to let the user know that # uniquing should be applied @@ -1994,16 +1940,15 @@ to achieve 2.0 style querying that's in terms of a specific relationship: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" posts = relationship(Post, lazy="dynamic") + jack = session.get(User, 5) # filter Jack's blog posts - posts = session.scalars( - jack.posts.statement.where(Post.headline == "this is a post") - ) + posts = session.scalars(jack.posts.statement.where(Post.headline == "this is a post")) * Use the :func:`_orm.with_parent` function to construct a :func:`_sql.select` construct directly:: @@ -2013,9 +1958,9 @@ to achieve 2.0 style querying that's in terms of a specific relationship: jack = session.get(User, 5) posts = session.scalars( - select(Post). - where(with_parent(jack, User.posts)). - where(Post.headline == "this is a post") + select(Post) + .where(with_parent(jack, User.posts)) + .where(Post.headline == "this is a post") ) **Discussion** @@ -2050,7 +1995,6 @@ is, this pattern:: # commits, won't be supported sess.flush() - **Migration to 2.0** The main reason a :class:`_orm.Session` is used in "autocommit" mode @@ -2066,7 +2010,7 @@ be called:: sess = Session(engine) sess.begin() # begin explicitly; if not called, will autobegin - # when database access is needed + # when database access is needed sess.add(obj) @@ -2104,6 +2048,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -2112,7 +2057,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -2122,12 +2066,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -2142,8 +2088,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) diff --git a/doc/build/changelog/unreleased_14/8525.rst b/doc/build/changelog/unreleased_14/8525.rst index 3031ec378c5..8508e396b47 100644 --- a/doc/build/changelog/unreleased_14/8525.rst +++ b/doc/build/changelog/unreleased_14/8525.rst @@ -7,4 +7,4 @@ Database via Azure Active Directory", which apparently lacks the ``system_views`` view entirely. Error catching has been extended that under no circumstances will this method ever fail, provided database connectivity - is present. \ No newline at end of file + is present. diff --git a/doc/build/changelog/unreleased_14/8569.rst b/doc/build/changelog/unreleased_14/8569.rst index fc3b3f73986..5ae6fce091c 100644 --- a/doc/build/changelog/unreleased_14/8569.rst +++ b/doc/build/changelog/unreleased_14/8569.rst @@ -10,4 +10,4 @@ combinations of SQL label names and aliasing. This "wrapping" is not appropriate for :func:`_orm.contains_eager` which has always had the contract that the user-defined SQL statement is unmodified with the - exception of adding the appropriate columns to be fetched. \ No newline at end of file + exception of adding the appropriate columns to be fetched. diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 5228235e73f..9481d9d4e41 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -21,7 +21,7 @@ Basic Usage Recall from :doc:`/core/engines` that an :class:`_engine.Engine` is created via the :func:`_sa.create_engine` call:: - engine = create_engine('mysql://scott:tiger@localhost/test') + engine = create_engine("mysql://scott:tiger@localhost/test") The typical usage of :func:`_sa.create_engine` is once per particular database URL, held globally for the lifetime of a single application process. A single @@ -48,7 +48,7 @@ a textual statement to the database looks like:: with engine.connect() as connection: result = connection.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) Above, the :meth:`_engine.Engine.connect` method returns a :class:`_engine.Connection` object, and by using it in a Python context manager (e.g. the ``with:`` @@ -146,13 +146,15 @@ issue a transaction on a :class:`_engine.Connection`, but only the outermost with connection.begin(): # open a transaction method_b(connection) + # method_b also starts a transaction def method_b(connection): - with connection.begin(): # open a transaction - this runs in the - # context of method_a's transaction + with connection.begin(): # open a transaction - this runs in the + # context of method_a's transaction connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -187,12 +189,14 @@ adapt the example from the previous section to this practice looks like:: def method_a(connection): method_b(connection) + # method_b uses the connection and assumes the transaction # is external def method_b(connection): connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection inside of a transaction and call method_a with engine.begin() as conn: method_a(conn) @@ -227,6 +231,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(connection): if not connection.in_transaction(): @@ -242,6 +247,7 @@ The above contextmanager would be used as:: with transaction(connection): # open a transaction method_b(connection) + # method_b either starts a transaction, or uses the one already # present def method_b(connection): @@ -249,6 +255,7 @@ The above contextmanager would be used as:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # open a Connection and call method_a with engine.connect() as conn: method_a(conn) @@ -260,6 +267,7 @@ present:: import contextlib + def connectivity(engine): connection = None @@ -285,6 +293,7 @@ Using the above would look like:: with connectivity(): method_b(connectivity) + # method_b also wants to use a connection from the context, so it # also calls "with:", but also it actually uses the connection. def method_b(connectivity): @@ -292,6 +301,7 @@ Using the above would look like:: connection.execute(text("insert into mytable values ('bat', 'lala')")) connection.execute(mytable.insert(), {"col1": "bat", "col2": "lala"}) + # create a new connection/transaction context object and call # method_a method_a(connectivity(engine)) @@ -438,9 +448,7 @@ parameter to :func:`_sa.create_engine`:: eng = create_engine( "postgresql://scott:tiger@localhost/test", - execution_options={ - "isolation_level": "REPEATABLE READ" - } + execution_options={"isolation_level": "REPEATABLE READ"}, ) With the above setting, the DBAPI connection will be set to use a @@ -461,7 +469,6 @@ separated off from the main engine:: autocommit_engine = eng.execution_options(isolation_level="AUTOCOMMIT") - Above, the :meth:`_engine.Engine.execution_options` method creates a shallow copy of the original :class:`_engine.Engine`. Both ``eng`` and ``autocommit_engine`` share the same dialect and connection pool. However, the @@ -726,11 +733,7 @@ combination has includes: These three behaviors are illustrated in the example below:: with engine.connect() as conn: - result = ( - conn. - execution_options(yield_per=100). - execute(text("select * from table")) - ) + result = conn.execution_options(yield_per=100).execute(text("select * from table")) for partition in result.partitions(): # partition is an iterable that will be at most 100 items @@ -818,7 +821,7 @@ which is not a :class:`_engine.Connection`. This was illustrated using the result = engine.execute(text("select username from users")) for row in result: - print("username:", row['username']) + print("username:", row["username"]) In addition to "connectionless" execution, it is also possible to use the :meth:`~.Executable.execute` method of @@ -832,9 +835,11 @@ Given a table as below:: from sqlalchemy import MetaData, Table, Column, Integer metadata_obj = MetaData() - users_table = Table('users', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + users_table = Table( + "users", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) Explicit execution delivers the SQL text or constructed SQL expression to the @@ -948,9 +953,10 @@ to render under different schema names without any changes. Given a table:: user_table = Table( - 'user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) The "schema" of this :class:`_schema.Table` as defined by the @@ -960,7 +966,8 @@ that all :class:`_schema.Table` objects with a schema of ``None`` would instead render the schema as ``user_schema_one``:: connection = engine.connect().execution_options( - schema_translate_map={None: "user_schema_one"}) + schema_translate_map={None: "user_schema_one"} + ) result = connection.execute(user_table.select()) @@ -974,10 +981,11 @@ map can specify any number of target->destination schemas:: connection = engine.connect().execution_options( schema_translate_map={ - None: "user_schema_one", # no schema name -> "user_schema_one" - "special": "special_schema", # schema="special" becomes "special_schema" - "public": None # Table objects with schema="public" will render with no schema - }) + None: "user_schema_one", # no schema name -> "user_schema_one" + "special": "special_schema", # schema="special" becomes "special_schema" + "public": None, # Table objects with schema="public" will render with no schema + } + ) The :paramref:`.Connection.execution_options.schema_translate_map` parameter affects all DDL and SQL constructs generated from the SQL expression language, @@ -1002,7 +1010,7 @@ as the schema name is passed to these methods explicitly. to the :class:`_orm.Session`. The :class:`_orm.Session` uses a new :class:`_engine.Connection` for each transaction:: - schema_engine = engine.execution_options(schema_translate_map = { ... } ) + schema_engine = engine.execution_options(schema_translate_map={...}) session = Session(schema_engine) @@ -1148,9 +1156,7 @@ As an example, we will examine the logging produced by the following program:: s = Session(e) - s.add_all( - [A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])] - ) + s.add_all([A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()]), A(bs=[B(), B(), B()])]) s.commit() for a_rec in s.query(A): @@ -1401,6 +1407,7 @@ a SQL string directly, dialect authors can apply the attribute as follows:: from sqlalchemy.engine.default import DefaultDialect + class MyDialect(DefaultDialect): supports_statement_cache = True @@ -1426,9 +1433,9 @@ like this:: def limit_clause(self, select, **kw): text = "" if select._limit is not None: - text += " \n LIMIT %d" % (select._limit, ) + text += " \n LIMIT %d" % (select._limit,) if select._offset is not None: - text += " \n OFFSET %d" % (select._offset, ) + text += " \n OFFSET %d" % (select._offset,) return text The above routine renders the :attr:`.Select._limit` and @@ -1546,6 +1553,7 @@ approach:: from sqlalchemy import lambda_stmt + def run_my_statement(connection, parameter): stmt = lambda_stmt(lambda: select(table)) stmt += lambda s: s.where(table.c.col == parameter) @@ -1553,6 +1561,7 @@ approach:: return connection.execute(stmt) + with engine.connect() as conn: result = run_my_statement(some_connection, "some parameter") @@ -1588,9 +1597,10 @@ Basic guidelines include: def upd(id_, newname): stmt = lambda_stmt(lambda: users.update()) stmt += lambda s: s.values(name=newname) - stmt += lambda s: s.where(users.c.id==id_) + stmt += lambda s: s.where(users.c.id == id_) return stmt + with engine.begin() as conn: conn.execute(upd(7, "foo")) @@ -1621,12 +1631,10 @@ Basic guidelines include: >>> def my_stmt(x, y): ... stmt = lambda_stmt(lambda: select(func.max(x, y))) ... return stmt - ... >>> engine = create_engine("sqlite://", echo=True) >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) ... print(conn.scalar(my_stmt(12, 8))) - ... {opensql}SELECT max(?, ?) AS max_1 [generated in 0.00057s] (5, 10){stop} 10 @@ -1677,15 +1685,14 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... ... stmt = lambda_stmt(lambda: select(func.max(get_x(), get_y()))) ... return stmt - ... >>> with engine.connect() as conn: ... print(conn.scalar(my_stmt(5, 10))) - ... Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Can't invoke Python callable get_x() @@ -1701,6 +1708,7 @@ Basic guidelines include: >>> def my_stmt(x, y): ... def get_x(): ... return x + ... ... def get_y(): ... return y ... @@ -1722,14 +1730,11 @@ Basic guidelines include: ... def __init__(self, x, y): ... self.x = x ... self.y = y - ... >>> def my_stmt(foo): ... stmt = lambda_stmt(lambda: select(func.max(foo.x, foo.y))) ... return stmt - ... >>> with engine.connect() as conn: - ... print(conn.scalar(my_stmt(Foo(5, 10)))) - ... + ... print(conn.scalar(my_stmt(Foo(5, 10)))) Traceback (most recent call last): # ... sqlalchemy.exc.InvalidRequestError: Closure variable named 'foo' inside of @@ -1766,8 +1771,7 @@ Basic guidelines include: >>> def my_stmt(foo): ... stmt = lambda_stmt( - ... lambda: select(func.max(foo.x, foo.y)), - ... track_closure_variables=False + ... lambda: select(func.max(foo.x, foo.y)), track_closure_variables=False ... ) ... return stmt @@ -1783,13 +1787,9 @@ Basic guidelines include: >>> def my_stmt(self, foo): ... stmt = lambda_stmt( - ... lambda: select(*self.column_expressions), - ... track_closure_variables=False - ... ) - ... stmt = stmt.add_criteria( - ... lambda: self.where_criteria, - ... track_on=[self] + ... lambda: select(*self.column_expressions), track_closure_variables=False ... ) + ... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self]) ... return stmt Using ``track_on`` means the given objects will be stored long term in the @@ -1812,7 +1812,7 @@ SQL expression construct by producing a structure that represents all the state within the construct:: >>> from sqlalchemy import select, column - >>> stmt = select(column('q')) + >>> stmt = select(column("q")) >>> cache_key = stmt._generate_cache_key() >>> print(cache_key) # somewhat paraphrased CacheKey(key=( @@ -2028,7 +2028,6 @@ method may be used:: with engine.connect() as conn: conn.exec_driver_sql("SET param='bar'") - .. versionadded:: 1.4 Added the :meth:`_engine.Connection.exec_driver_sql` method. .. _dbapi_connections_cursor: @@ -2105,7 +2104,7 @@ may potentially be used with your DBAPI. An example of this pattern is:: connection = engine.raw_connection() try: cursor_obj = connection.cursor() - cursor_obj.callproc("my_procedure", ['x', 'y', 'z']) + cursor_obj.callproc("my_procedure", ["x", "y", "z"]) results = list(cursor_obj.fetchall()) cursor_obj.close() connection.commit() @@ -2151,8 +2150,6 @@ Multiple result set support is available from a raw DBAPI cursor using the finally: connection.close() - - Registering New Dialects ======================== @@ -2168,7 +2165,7 @@ to create a new dialect "foodialect://", the steps are as follows: via ``foodialect.dialect``. 3. The entry point can be established in setup.py as follows:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] foodialect = foodialect.dialect:FooDialect """ @@ -2178,7 +2175,7 @@ an existing SQLAlchemy-supported database, the name can be given including a database-qualification. For example, if ``FooDialect`` were in fact a MySQL dialect, the entry point could be established like this:: - entry_points=""" + entry_points = """ [sqlalchemy.dialects] mysql.foodialect = foodialect.dialect:FooDialect """ @@ -2192,6 +2189,7 @@ SQLAlchemy also allows a dialect to be registered within the current process, by the need for separate installation. Use the ``register()`` function as follows:: from sqlalchemy.dialects import registry + registry.register("mysql.foodialect", "myapp.dialect", "MyMySQLDialect") The above will respond to ``create_engine("mysql+foodialect://")`` and load the diff --git a/doc/build/core/constraints.rst b/doc/build/core/constraints.rst index 038c3134dd1..aa322238f58 100644 --- a/doc/build/core/constraints.rst +++ b/doc/build/core/constraints.rst @@ -33,11 +33,13 @@ column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object:: - user_preference = Table('user_preference', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_preference = Table( + "user_preference", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) Above, we define a new table ``user_preference`` for which each row must @@ -64,21 +66,27 @@ known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key:: - invoice = Table('invoice', metadata_obj, - Column('invoice_id', Integer, primary_key=True), - Column('ref_num', Integer, primary_key=True), - Column('description', String(60), nullable=False) + invoice = Table( + "invoice", + metadata_obj, + Column("invoice_id", Integer, primary_key=True), + Column("ref_num", Integer, primary_key=True), + Column("description", String(60), nullable=False), ) And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``:: - invoice_item = Table('invoice_item', metadata_obj, - Column('item_id', Integer, primary_key=True), - Column('item_name', String(60), nullable=False), - Column('invoice_id', Integer, nullable=False), - Column('ref_num', Integer, nullable=False), - ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num']) + invoice_item = Table( + "invoice_item", + metadata_obj, + Column("item_id", Integer, primary_key=True), + Column("item_name", String(60), nullable=False), + Column("invoice_id", Integer, nullable=False), + Column("ref_num", Integer, nullable=False), + ForeignKeyConstraint( + ["invoice_id", "ref_num"], ["invoice.invoice_id", "invoice.ref_num"] + ), ) It's important to note that the @@ -126,22 +134,20 @@ statements, on all backends other than SQLite which does not support most forms of ALTER. Given a schema like:: node = Table( - 'node', metadata_obj, - Column('node_id', Integer, primary_key=True), - Column( - 'primary_element', Integer, - ForeignKey('element.element_id') - ) + "node", + metadata_obj, + Column("node_id", Integer, primary_key=True), + Column("primary_element", Integer, ForeignKey("element.element_id")), ) element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - name='fk_element_parent_node_id' - ) + ["parent_node_id"], ["node.node_id"], name="fk_element_parent_node_id" + ), ) When we call upon :meth:`_schema.MetaData.create_all` on a backend such as the @@ -151,7 +157,7 @@ constraints are created separately: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -179,7 +185,7 @@ those constraints that are named: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.drop_all(conn, checkfirst=False) + ... metadata_obj.drop_all(conn, checkfirst=False) {opensql}ALTER TABLE element DROP CONSTRAINT fk_element_parent_node_id DROP TABLE node DROP TABLE element @@ -205,13 +211,16 @@ to manually resolve dependency cycles. We can add this flag only to the ``'element'`` table as follows:: element = Table( - 'element', metadata_obj, - Column('element_id', Integer, primary_key=True), - Column('parent_node_id', Integer), + "element", + metadata_obj, + Column("element_id", Integer, primary_key=True), + Column("parent_node_id", Integer), ForeignKeyConstraint( - ['parent_node_id'], ['node.node_id'], - use_alter=True, name='fk_element_parent_node_id' - ) + ["parent_node_id"], + ["node.node_id"], + use_alter=True, + name="fk_element_parent_node_id", + ), ) in our CREATE DDL we will see the ALTER statement only for this constraint, @@ -220,7 +229,7 @@ and not the other one: .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... metadata_obj.create_all(conn, checkfirst=False) + ... metadata_obj.create_all(conn, checkfirst=False) {opensql}CREATE TABLE element ( element_id SERIAL NOT NULL, parent_node_id INTEGER, @@ -282,22 +291,29 @@ generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase:: - child = Table('child', metadata_obj, - Column('id', Integer, - ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"), - primary_key=True - ) - ) - - composite = Table('composite', metadata_obj, - Column('id', Integer, primary_key=True), - Column('rev_id', Integer), - Column('note_id', Integer), + child = Table( + "child", + metadata_obj, + Column( + "id", + Integer, + ForeignKey("parent.id", onupdate="CASCADE", ondelete="CASCADE"), + primary_key=True, + ), + ) + + composite = Table( + "composite", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("rev_id", Integer), + Column("note_id", Integer), ForeignKeyConstraint( - ['rev_id', 'note_id'], - ['revisions.id', 'revisions.note_id'], - onupdate="CASCADE", ondelete="SET NULL" - ) + ["rev_id", "note_id"], + ["revisions.id", "revisions.note_id"], + onupdate="CASCADE", + ondelete="SET NULL", + ), ) Note that these clauses require ``InnoDB`` tables when used with MySQL. @@ -327,17 +343,16 @@ unique constraints and/or those with multiple columns are created via the from sqlalchemy import UniqueConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column anonymous unique constraint - Column('col1', Integer, unique=True), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, unique=True), + Column("col2", Integer), + Column("col3", Integer), # explicit/composite unique constraint. 'name' is optional. - UniqueConstraint('col2', 'col3', name='uix_1') - ) + UniqueConstraint("col2", "col3", name="uix_1"), + ) CHECK Constraint ---------------- @@ -357,17 +372,16 @@ MySQL. from sqlalchemy import CheckConstraint metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - + mytable = Table( + "mytable", + metadata_obj, # per-column CHECK constraint - Column('col1', Integer, CheckConstraint('col1>5')), - - Column('col2', Integer), - Column('col3', Integer), - + Column("col1", Integer, CheckConstraint("col1>5")), + Column("col2", Integer), + Column("col3", Integer), # table level CHECK constraint. 'name' is optional. - CheckConstraint('col2 > col3 + 5', name='check1') - ) + CheckConstraint("col2 > col3 + 5", name="check1"), + ) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -388,12 +402,14 @@ option of being configured directly:: from sqlalchemy import PrimaryKeyConstraint - my_table = Table('mytable', metadata_obj, - Column('id', Integer), - Column('version_id', Integer), - Column('data', String(50)), - PrimaryKeyConstraint('id', 'version_id', name='mytable_pk') - ) + my_table = Table( + "mytable", + metadata_obj, + Column("id", Integer), + Column("version_id", Integer), + Column("data", String(50)), + PrimaryKeyConstraint("id", "version_id", name="mytable_pk"), + ) .. seealso:: @@ -468,11 +484,11 @@ one exception case where an existing name can be further embellished). An example naming convention that suits basic cases is as follows:: convention = { - "ix": 'ix_%(column_0_label)s', - "uq": "uq_%(table_name)s_%(column_0_name)s", - "ck": "ck_%(table_name)s_%(constraint_name)s", - "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", - "pk": "pk_%(table_name)s" + "ix": "ix_%(column_0_label)s", + "uq": "uq_%(table_name)s_%(column_0_name)s", + "ck": "ck_%(table_name)s_%(constraint_name)s", + "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", + "pk": "pk_%(table_name)s", } metadata_obj = MetaData(naming_convention=convention) @@ -482,10 +498,12 @@ the target :class:`_schema.MetaData` collection. For example, we can observe the name produced when we create an unnamed :class:`.UniqueConstraint`:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False), - ... UniqueConstraint('name') + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False), + ... UniqueConstraint("name"), ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -493,10 +511,12 @@ For example, we can observe the name produced when we create an unnamed This same feature takes effect even if we just use the :paramref:`_schema.Column.unique` flag:: - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30), nullable=False, unique=True) - ... ) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30), nullable=False, unique=True), + ... ) >>> list(user_table.constraints)[1].name 'uq_user_name' @@ -543,16 +563,17 @@ deterministically truncated using a 4-character suffix based on the md5 hash of the long name. For example, the naming convention below will generate very long names given the column names in use:: - metadata_obj = MetaData(naming_convention={ - "uq": "uq_%(table_name)s_%(column_0_N_name)s" - }) + metadata_obj = MetaData( + naming_convention={"uq": "uq_%(table_name)s_%(column_0_N_name)s"} + ) long_names = Table( - 'long_names', metadata_obj, - Column('information_channel_code', Integer, key='a'), - Column('billing_convention_name', Integer, key='b'), - Column('product_identifier', Integer, key='c'), - UniqueConstraint('a', 'b', 'c') + "long_names", + metadata_obj, + Column("information_channel_code", Integer, key="a"), + Column("billing_convention_name", Integer, key="b"), + Column("product_identifier", Integer, key="c"), + UniqueConstraint("a", "b", "c"), ) On the PostgreSQL dialect, names longer than 63 characters will be truncated @@ -580,20 +601,22 @@ that as follows:: import uuid + def fk_guid(constraint, table): - str_tokens = [ - table.name, - ] + [ - element.parent.name for element in constraint.elements - ] + [ - element.target_fullname for element in constraint.elements - ] - guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode('ascii')) + str_tokens = ( + [ + table.name, + ] + + [element.parent.name for element in constraint.elements] + + [element.target_fullname for element in constraint.elements] + ) + guid = uuid.uuid5(uuid.NAMESPACE_OID, "_".join(str_tokens).encode("ascii")) return str(guid) + convention = { "fk_guid": fk_guid, - "ix": 'ix_%(column_0_label)s', + "ix": "ix_%(column_0_label)s", "fk": "fk_%(fk_guid)s", } @@ -602,18 +625,21 @@ name as follows:: >>> metadata_obj = MetaData(naming_convention=convention) - >>> user_table = Table('user', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('version', Integer, primary_key=True), - ... Column('data', String(30)) - ... ) - >>> address_table = Table('address', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', Integer), - ... Column('user_version_id', Integer) - ... ) - >>> fk = ForeignKeyConstraint(['user_id', 'user_version_id'], - ... ['user.id', 'user.version']) + >>> user_table = Table( + ... "user", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("version", Integer, primary_key=True), + ... Column("data", String(30)), + ... ) + >>> address_table = Table( + ... "address", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", Integer), + ... Column("user_version_id", Integer), + ... ) + >>> fk = ForeignKeyConstraint(["user_id", "user_version_id"], ["user.id", "user.version"]) >>> address_table.append_constraint(fk) >>> fk.name fk_0cd51ab5-8d70-56e8-a83c-86661737766d @@ -646,9 +672,11 @@ A typical convention is ``"ck_%(table_name)s_%(constraint_name)s"``:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint('value > 5', name='value_gt_5') + Table( + "foo", + metadata_obj, + Column("value", Integer), + CheckConstraint("value > 5", name="value_gt_5"), ) The above table will produce the name ``ck_foo_value_gt_5``:: @@ -663,13 +691,9 @@ token; we can make use of this by ensuring we use a :class:`_schema.Column` or :func:`_expression.column` element within the constraint's expression, either by declaring the constraint separate from the table:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer) - ) + foo = Table("foo", metadata_obj, Column("value", Integer)) CheckConstraint(foo.c.value > 5) @@ -677,13 +701,10 @@ or by using a :func:`_expression.column` inline:: from sqlalchemy import column - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - foo = Table('foo', metadata_obj, - Column('value', Integer), - CheckConstraint(column('value') > 5) + foo = Table( + "foo", metadata_obj, Column("value", Integer), CheckConstraint(column("value") > 5) ) Both will produce the name ``ck_foo_value``:: @@ -712,9 +733,7 @@ and :class:`.Enum` which generate a CHECK constraint accompanying the type. The name for the constraint here is most directly set up by sending the "name" parameter, e.g. :paramref:`.Boolean.name`:: - Table('foo', metadata_obj, - Column('flag', Boolean(name='ck_foo_flag')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="ck_foo_flag"))) The naming convention feature may be combined with these types as well, normally by using a convention which includes ``%(constraint_name)s`` @@ -724,9 +743,7 @@ and then applying a name to the type:: naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"} ) - Table('foo', metadata_obj, - Column('flag', Boolean(name='flag_bool')) - ) + Table("foo", metadata_obj, Column("flag", Boolean(name="flag_bool"))) The above table will produce the constraint name ``ck_foo_flag_bool``:: @@ -748,13 +765,9 @@ The CHECK constraint may also make use of the ``column_0_name`` token, which works nicely with :class:`.SchemaType` since these constraints have only one column:: - metadata_obj = MetaData( - naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"} - ) + metadata_obj = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(column_0_name)s"}) - Table('foo', metadata_obj, - Column('flag', Boolean()) - ) + Table("foo", metadata_obj, Column("flag", Boolean())) The above schema will produce:: @@ -822,25 +835,24 @@ INDEX" is issued right after the create statements for the table: .. sourcecode:: python+sql metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, + mytable = Table( + "mytable", + metadata_obj, # an indexed column, with index "ix_mytable_col1" - Column('col1', Integer, index=True), - + Column("col1", Integer, index=True), # a uniquely indexed column with index "ix_mytable_col2" - Column('col2', Integer, index=True, unique=True), - - Column('col3', Integer), - Column('col4', Integer), - - Column('col5', Integer), - Column('col6', Integer), - ) + Column("col2", Integer, index=True, unique=True), + Column("col3", Integer), + Column("col4", Integer), + Column("col5", Integer), + Column("col6", Integer), + ) # place an index on col3, col4 - Index('idx_col34', mytable.c.col3, mytable.c.col4) + Index("idx_col34", mytable.c.col3, mytable.c.col4) # place a unique index on col5, col6 - Index('myindex', mytable.c.col5, mytable.c.col6, unique=True) + Index("myindex", mytable.c.col5, mytable.c.col6, unique=True) {sql}mytable.create(engine) CREATE TABLE mytable ( @@ -863,26 +875,24 @@ objects directly. :class:`.Index` also supports identify columns:: metadata_obj = MetaData() - mytable = Table('mytable', metadata_obj, - Column('col1', Integer), - - Column('col2', Integer), - - Column('col3', Integer), - Column('col4', Integer), - + mytable = Table( + "mytable", + metadata_obj, + Column("col1", Integer), + Column("col2", Integer), + Column("col3", Integer), + Column("col4", Integer), # place an index on col1, col2 - Index('idx_col12', 'col1', 'col2'), - + Index("idx_col12", "col1", "col2"), # place a unique index on col3, col4 - Index('idx_col34', 'col3', 'col4', unique=True) + Index("idx_col34", "col3", "col4", unique=True), ) The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method: .. sourcecode:: python+sql - i = Index('someindex', mytable.c.col5) + i = Index("someindex", mytable.c.col5) {sql}i.create(engine) CREATE INDEX someindex ON mytable (col5){stop} @@ -897,14 +907,14 @@ value, the :meth:`_expression.ColumnElement.desc` modifier may be used:: from sqlalchemy import Index - Index('someindex', mytable.c.somecol.desc()) + Index("someindex", mytable.c.somecol.desc()) Or with a backend that supports functional indexes such as PostgreSQL, a "case insensitive" index can be created using the ``lower()`` function:: from sqlalchemy import func, Index - Index('someindex', func.lower(mytable.c.somecol)) + Index("someindex", func.lower(mytable.c.somecol)) Index API --------- diff --git a/doc/build/core/custom_types.rst b/doc/build/core/custom_types.rst index 5f9e0555d63..0db63fad942 100644 --- a/doc/build/core/custom_types.rst +++ b/doc/build/core/custom_types.rst @@ -24,6 +24,7 @@ can be associated with any type:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.types import BINARY + @compiles(BINARY, "sqlite") def compile_binary_sqlite(type_, compiler, **kw): return "BLOB" @@ -93,6 +94,7 @@ which coerces as needed:: from sqlalchemy.types import TypeDecorator, Unicode + class CoerceUTF8(TypeDecorator): """Safely coerce Python bytestrings to Unicode before passing off to the database.""" @@ -101,7 +103,7 @@ which coerces as needed:: def process_bind_param(self, value, dialect): if isinstance(value, str): - value = value.decode('utf-8') + value = value.decode("utf-8") return value Rounding Numerics @@ -113,6 +115,7 @@ many decimal places. Here's a recipe that rounds them down:: from sqlalchemy.types import TypeDecorator, Numeric from decimal import Decimal + class SafeNumeric(TypeDecorator): """Adds quantization to Numeric.""" @@ -120,12 +123,11 @@ many decimal places. Here's a recipe that rounds them down:: def __init__(self, *arg, **kw): TypeDecorator.__init__(self, *arg, **kw) - self.quantize_int = - self.impl.scale + self.quantize_int = -self.impl.scale self.quantize = Decimal(10) ** self.quantize_int def process_bind_param(self, value, dialect): - if isinstance(value, Decimal) and \ - value.as_tuple()[2] < self.quantize_int: + if isinstance(value, Decimal) and value.as_tuple()[2] < self.quantize_int: value = value.quantize(self.quantize) return value @@ -147,6 +149,7 @@ denormalize:: import datetime + class TZDateTime(TypeDecorator): impl = DateTime cache_ok = True @@ -155,9 +158,7 @@ denormalize:: if value is not None: if not value.tzinfo: raise TypeError("tzinfo is required") - value = value.astimezone(datetime.timezone.utc).replace( - tzinfo=None - ) + value = value.astimezone(datetime.timezone.utc).replace(tzinfo=None) return value def process_result_value(self, value, dialect): @@ -165,7 +166,6 @@ denormalize:: value = value.replace(tzinfo=datetime.timezone.utc) return value - .. _custom_guid_type: Backend-agnostic GUID Type @@ -180,6 +180,7 @@ binary in CHAR(16) if desired:: from sqlalchemy.dialects.postgresql import UUID import uuid + class GUID(TypeDecorator): """Platform-independent GUID type. @@ -187,11 +188,12 @@ binary in CHAR(16) if desired:: CHAR(32), storing as stringified hex values. """ + impl = CHAR cache_ok = True def load_dialect_impl(self, dialect): - if dialect.name == 'postgresql': + if dialect.name == "postgresql": return dialect.type_descriptor(UUID()) else: return dialect.type_descriptor(CHAR(32)) @@ -199,7 +201,7 @@ binary in CHAR(16) if desired:: def process_bind_param(self, value, dialect): if value is None: return value - elif dialect.name == 'postgresql': + elif dialect.name == "postgresql": return str(value) else: if not isinstance(value, uuid.UUID): @@ -269,12 +271,12 @@ dictionary-oriented JSON structure, we can apply this as:: json_type = MutableDict.as_mutable(JSONEncodedDict) + class MyClass(Base): # ... json_data = Column(json_type) - .. seealso:: :ref:`mutable_toplevel` @@ -295,8 +297,7 @@ get at this with a type like ``JSONEncodedDict``, we need to from sqlalchemy import type_coerce, String - stmt = select(my_table).where( - type_coerce(my_table.c.json_data, String).like('%foo%')) + stmt = select(my_table).where(type_coerce(my_table.c.json_data, String).like("%foo%")) :class:`.TypeDecorator` provides a built-in system for working up type translations like these based on operators. If we wanted to frequently use the @@ -307,6 +308,7 @@ method:: from sqlalchemy.sql import operators from sqlalchemy import String + class JSONEncodedDict(TypeDecorator): impl = VARCHAR @@ -367,6 +369,7 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: from sqlalchemy import func from sqlalchemy.types import UserDefinedType + class Geometry(UserDefinedType): def get_col_spec(self): return "GEOMETRY" @@ -380,13 +383,18 @@ in conjunction with :data:`~.sqlalchemy.sql.expression.func`:: We can apply the ``Geometry`` type into :class:`_schema.Table` metadata and use it in a :func:`_expression.select` construct:: - geometry = Table('geometry', metadata, - Column('geom_id', Integer, primary_key=True), - Column('geom_data', Geometry) - ) + geometry = Table( + "geometry", + metadata, + Column("geom_id", Integer, primary_key=True), + Column("geom_data", Geometry), + ) - print(select(geometry).where( - geometry.c.geom_data == 'LINESTRING(189412 252431,189631 259122)')) + print( + select(geometry).where( + geometry.c.geom_data == "LINESTRING(189412 252431,189631 259122)" + ) + ) The resulting SQL embeds both functions as appropriate. ``ST_AsText`` is applied to the columns clause so that the return value is run through @@ -403,7 +411,7 @@ with the labeling of the wrapped expression. Such as, if we rendered a :func:`_expression.select` against a :func:`.label` of our expression, the string label is moved to the outside of the wrapped expression:: - print(select(geometry.c.geom_data.label('my_data'))) + print(select(geometry.c.geom_data.label("my_data"))) Output:: @@ -415,11 +423,21 @@ Another example is we decorate PostgreSQL ``pgcrypto`` extension to encrypt/decrypt values transparently:: - from sqlalchemy import create_engine, String, select, func, \ - MetaData, Table, Column, type_coerce, TypeDecorator + from sqlalchemy import ( + create_engine, + String, + select, + func, + MetaData, + Table, + Column, + type_coerce, + TypeDecorator, + ) from sqlalchemy.dialects.postgresql import BYTEA + class PGPString(TypeDecorator): impl = BYTEA @@ -440,24 +458,24 @@ transparently:: def column_expression(self, col): return func.pgp_sym_decrypt(col, self.passphrase) + metadata_obj = MetaData() - message = Table('message', metadata_obj, - Column('username', String(50)), - Column('message', - PGPString("this is my passphrase")), - ) + message = Table( + "message", + metadata_obj, + Column("username", String(50)), + Column("message", PGPString("this is my passphrase")), + ) engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True) with engine.begin() as conn: metadata_obj.create_all(conn) - conn.execute(message.insert(), username="some user", - message="this is my message") + conn.execute(message.insert(), username="some user", message="this is my message") - print(conn.scalar( - select(message.c.message).\ - where(message.c.username == "some user") - )) + print( + conn.scalar(select(message.c.message).where(message.c.username == "some user")) + ) The ``pgp_sym_encrypt`` and ``pgp_sym_decrypt`` functions are applied to the INSERT and SELECT statements:: @@ -499,7 +517,7 @@ is given a string representing the SQL operator to render, and the return value is a Python callable that accepts any arbitrary right-hand side expression:: >>> from sqlalchemy import column - >>> expr = column('x').op('>>')(column('y')) + >>> expr = column("x").op(">>")(column("y")) >>> print(expr) x >> y @@ -524,6 +542,7 @@ SQL itself:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -548,6 +567,7 @@ object directly:: from sqlalchemy import Integer + class MyInt(Integer): class comparator_factory(Integer.Comparator): def __add__(self, other): @@ -561,6 +581,7 @@ to integers:: from sqlalchemy import Integer, func + class MyInt(Integer): class comparator_factory(Integer.Comparator): def log(self, other): @@ -589,17 +610,18 @@ along with a :class:`.custom_op` to produce the factorial expression:: from sqlalchemy.sql.expression import UnaryExpression from sqlalchemy.sql import operators + class MyInteger(Integer): class comparator_factory(Integer.Comparator): def factorial(self): - return UnaryExpression(self.expr, - modifier=operators.custom_op("!"), - type_=MyInteger) + return UnaryExpression( + self.expr, modifier=operators.custom_op("!"), type_=MyInteger + ) Using the above type:: >>> from sqlalchemy.sql import column - >>> print(column('x', MyInteger).factorial()) + >>> print(column("x", MyInteger).factorial()) x ! .. seealso:: @@ -651,8 +673,10 @@ datatype. For example:: >>> from sqlalchemy import Table, Column, MetaData, create_engine, PickleType, Integer >>> metadata = MetaData() - >>> my_table = Table("my_table", metadata, Column('id', Integer), Column("data", PickleType)) - >>> engine = create_engine("sqlite://", echo='debug') + >>> my_table = Table( + ... "my_table", metadata, Column("id", Integer), Column("data", PickleType) + ... ) + >>> engine = create_engine("sqlite://", echo="debug") >>> my_table.create(engine) INFO sqlalchemy.engine.base.Engine CREATE TABLE my_table ( @@ -703,7 +727,9 @@ use reflection in combination with explicit :class:`_schema.Column` objects for columns for which we want to use a custom or decorated datatype:: >>> metadata_three = MetaData() - >>> my_reflected_table = Table("my_table", metadata_three, Column("data", PickleType), autoload_with=engine) + >>> my_reflected_table = Table( + ... "my_table", metadata_three, Column("data", PickleType), autoload_with=engine + ... ) The ``my_reflected_table`` object above is reflected, and will load the definition of the "id" column from the SQLite database. But for the "data" @@ -726,6 +752,7 @@ for example we knew that we wanted all :class:`.BLOB` datatypes to in fact be from sqlalchemy import PickleType from sqlalchemy import Table + @event.listens_for(Table, "column_reflect") def _setup_pickletype(inspector, table, column_info): if isinstance(column_info["type"], BLOB): @@ -741,4 +768,4 @@ In practice, the above event-based approach would likely have additional rules in order to affect only those columns where the datatype is important, such as a lookup table of table names and possibly column names, or other heuristics in order to accurately determine which columns should be established with an -in Python datatype. \ No newline at end of file +in Python datatype. diff --git a/doc/build/core/ddl.rst b/doc/build/core/ddl.rst index 9c2fed198db..95665f26b92 100644 --- a/doc/build/core/ddl.rst +++ b/doc/build/core/ddl.rst @@ -32,9 +32,11 @@ other DDL elements except it accepts a string which is the text to be emitted: event.listen( metadata, "after_create", - DDL("ALTER TABLE users ADD CONSTRAINT " + DDL( + "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length " - " CHECK (length(user_name) >= 8)") + " CHECK (length(user_name) >= 8)" + ), ) A more comprehensive method of creating libraries of DDL constructs is to use @@ -54,9 +56,10 @@ method. For example, if we wanted to create a trigger but only on the PostgreSQL backend, we could invoke this as:: mytable = Table( - 'mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', String(50)) + "mytable", + metadata, + Column("id", Integer, primary_key=True), + Column("data", String(50)), ) func = DDL( @@ -73,30 +76,18 @@ the PostgreSQL backend, we could invoke this as:: "FOR EACH ROW EXECUTE PROCEDURE my_func();" ) - event.listen( - mytable, - 'after_create', - func.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", func.execute_if(dialect="postgresql")) - event.listen( - mytable, - 'after_create', - trigger.execute_if(dialect='postgresql') - ) + event.listen(mytable, "after_create", trigger.execute_if(dialect="postgresql")) The :paramref:`.DDLElement.execute_if.dialect` keyword also accepts a tuple of string dialect names:: event.listen( - mytable, - "after_create", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "after_create", trigger.execute_if(dialect=("postgresql", "mysql")) ) event.listen( - mytable, - "before_drop", - trigger.execute_if(dialect=('postgresql', 'mysql')) + mytable, "before_drop", trigger.execute_if(dialect=("postgresql", "mysql")) ) The :meth:`.DDLElement.execute_if` method can also work against a callable @@ -108,27 +99,29 @@ first looking within the PostgreSQL catalogs to see if it exists: def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( users, "after_create", DDL( "ALTER TABLE users ADD CONSTRAINT " "cst_user_name_length CHECK (length(user_name) >= 8)" - ).execute_if(callable_=should_create) + ).execute_if(callable_=should_create), ) event.listen( users, "before_drop", - DDL( - "ALTER TABLE users DROP CONSTRAINT cst_user_name_length" - ).execute_if(callable_=should_drop) + DDL("ALTER TABLE users DROP CONSTRAINT cst_user_name_length").execute_if( + callable_=should_drop + ), ) {sql}users.create(engine) @@ -198,22 +191,20 @@ constraints, using these as we did in our previous example of def should_create(ddl, target, connection, **kw): row = connection.execute( - "select conname from pg_constraint where conname='%s'" % - ddl.element.name).scalar() + "select conname from pg_constraint where conname='%s'" % ddl.element.name + ).scalar() return not bool(row) + def should_drop(ddl, target, connection, **kw): return not should_create(ddl, target, connection, **kw) + event.listen( - users, - "after_create", - AddConstraint(constraint).execute_if(callable_=should_create) + users, "after_create", AddConstraint(constraint).execute_if(callable_=should_create) ) event.listen( - users, - "before_drop", - DropConstraint(constraint).execute_if(callable_=should_drop) + users, "before_drop", DropConstraint(constraint).execute_if(callable_=should_drop) ) {sql}users.create(engine) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index bccc8375c1d..60e7e2bc57d 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -59,9 +59,7 @@ Scalar Defaults The simplest kind of default is a scalar value used as the default value of a column:: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, default=12) - ) + Table("mytable", metadata_obj, Column("somecolumn", Integer, default=12)) Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied. @@ -70,10 +68,7 @@ A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults):: - Table("mytable", metadata_obj, - Column("somecolumn", Integer, onupdate=25) - ) - + Table("mytable", metadata_obj, Column("somecolumn", Integer, onupdate=25)) Python-Executed Functions ------------------------- @@ -86,13 +81,18 @@ incrementing counter to a primary key column:: # a function which counts upwards i = 0 + + def mydefault(): global i i += 1 return i - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True, default=mydefault), + + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True, default=mydefault), ) It should be noted that for real "incrementing sequence" behavior, the @@ -109,11 +109,12 @@ the :paramref:`_schema.Column.onupdate` attribute:: import datetime - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'last_updated' to be populated with datetime.now() - Column('last_updated', DateTime, onupdate=datetime.datetime.now), + Column("last_updated", DateTime, onupdate=datetime.datetime.now), ) When an update statement executes and no value is passed for ``last_updated``, @@ -139,11 +140,14 @@ updated on the row. To access the context, provide a function that accepts a single ``context`` argument:: def mydefault(context): - return context.get_current_parameters()['counter'] + 12 + return context.get_current_parameters()["counter"] + 12 - t = Table('mytable', metadata_obj, - Column('counter', Integer), - Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault) + + t = Table( + "mytable", + metadata_obj, + Column("counter", Integer), + Column("counter_plus_twelve", Integer, default=mydefault, onupdate=mydefault), ) The above default generation function is applied so that it will execute for @@ -184,18 +188,21 @@ The :paramref:`_schema.Column.default` and :paramref:`_schema.Column.onupdate` k also be passed SQL expressions, which are in most cases rendered inline within the INSERT or UPDATE statement:: - t = Table("mytable", metadata_obj, - Column('id', Integer, primary_key=True), - + t = Table( + "mytable", + metadata_obj, + Column("id", Integer, primary_key=True), # define 'create_date' to default to now() - Column('create_date', DateTime, default=func.now()), - + Column("create_date", DateTime, default=func.now()), # define 'key' to pull its default from the 'keyvalues' table - Column('key', String(20), default=select(keyvalues.c.key).where(keyvalues.c.type='type1')), - + Column( + "key", + String(20), + default=select(keyvalues.c.key).where(keyvalues.c.type="type1"), + ), # define 'last_modified' to use the current_timestamp SQL function on update - Column('last_modified', DateTime, onupdate=func.utc_timestamp()) - ) + Column("last_modified", DateTime, onupdate=func.utc_timestamp()), + ) Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` @@ -257,10 +264,12 @@ placed in the CREATE TABLE statement during a :meth:`_schema.Table.create` opera .. sourcecode:: python+sql - t = Table('test', metadata_obj, - Column('abc', String(20), server_default='abc'), - Column('created_at', DateTime, server_default=func.sysdate()), - Column('index_value', Integer, server_default=text("0")) + t = Table( + "test", + metadata_obj, + Column("abc", String(20), server_default="abc"), + Column("created_at", DateTime, server_default=func.sysdate()), + Column("index_value", Integer, server_default=text("0")), ) A create call for the above table will produce:: @@ -296,10 +305,12 @@ may be called out using :class:`.FetchedValue` as a marker:: from sqlalchemy.schema import FetchedValue - t = Table('test', metadata_obj, - Column('id', Integer, primary_key=True), - Column('abc', TIMESTAMP, server_default=FetchedValue()), - Column('def', String(20), server_onupdate=FetchedValue()) + t = Table( + "test", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("abc", TIMESTAMP, server_default=FetchedValue()), + Column("def", String(20), server_onupdate=FetchedValue()), ) The :class:`.FetchedValue` indicator does not affect the rendered DDL for the @@ -344,13 +355,17 @@ The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) Where above, the table "cartitems" is associated with a sequence named @@ -397,7 +412,7 @@ object, it can be invoked with its "next value" instruction by passing it directly to a SQL execution method:: with my_engine.connect() as conn: - seq = Sequence('some_sequence') + seq = Sequence("some_sequence") nextid = conn.execute(seq) In order to embed the "next value" function of a :class:`.Sequence` @@ -405,7 +420,7 @@ inside of a SQL statement like a SELECT or INSERT, use the :meth:`.Sequence.next method, which will render at statement compilation time a SQL function that is appropriate for the target backend:: - >>> my_seq = Sequence('some_sequence') + >>> my_seq = Sequence("some_sequence") >>> stmt = select(my_seq.next_value()) >>> print(stmt.compile(dialect=postgresql.dialect())) SELECT nextval('some_sequence') AS next_value_1 @@ -418,24 +433,29 @@ Associating a Sequence with the MetaData For many years, the SQLAlchemy documentation referred to the example of associating a :class:`.Sequence` with a table as follows:: - table = Table("cartitems", metadata_obj, - Column("cart_id", Integer, Sequence('cart_id_seq'), - primary_key=True), + table = Table( + "cartitems", + metadata_obj, + Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) While the above is a prominent idiomatic pattern, it is recommended that the :class:`.Sequence` in most cases be explicitly associated with the :class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter:: - table = Table("cartitems", metadata_obj, + table = Table( + "cartitems", + metadata_obj, Column( "cart_id", Integer, - Sequence('cart_id_seq', metadata=metadata_obj), primary_key=True), + Sequence("cart_id_seq", metadata=metadata_obj), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) The :class:`.Sequence` object is a first class @@ -480,8 +500,8 @@ The preceding sections illustrate how to associate a :class:`.Sequence` with a :class:`_schema.Column` as the **Python side default generator**:: Column( - "cart_id", Integer, Sequence('cart_id_seq', metadata=metadata_obj), - primary_key=True) + "cart_id", Integer, Sequence("cart_id_seq", metadata=metadata_obj), primary_key=True + ) In the above case, the :class:`.Sequence` will automatically be subject to CREATE SEQUENCE / DROP SEQUENCE DDL when the related :class:`_schema.Table` @@ -497,24 +517,30 @@ we illustrate the same :class:`.Sequence` being associated with the :class:`_schema.Column` both as the Python-side default generator as well as the server-side default generator:: - cart_id_seq = Sequence('cart_id_seq', metadata=metadata_obj) - table = Table("cartitems", metadata_obj, + cart_id_seq = Sequence("cart_id_seq", metadata=metadata_obj) + table = Table( + "cartitems", + metadata_obj, Column( - "cart_id", Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True), + "cart_id", + Integer, + cart_id_seq, + server_default=cart_id_seq.next_value(), + primary_key=True, + ), Column("description", String(40)), - Column("createdate", DateTime()) + Column("createdate", DateTime()), ) or with the ORM:: class CartItem(Base): - __tablename__ = 'cartitems' + __tablename__ = "cartitems" - cart_id_seq = Sequence('cart_id_seq', metadata=Base.metadata) + cart_id_seq = Sequence("cart_id_seq", metadata=Base.metadata) cart_id = Column( - Integer, cart_id_seq, - server_default=cart_id_seq.next_value(), primary_key=True) + Integer, cart_id_seq, server_default=cart_id_seq.next_value(), primary_key=True + ) description = Column(String(40)) createdate = Column(DateTime) @@ -665,8 +691,8 @@ Example:: data = Table( "data", metadata_obj, - Column('id', Integer, Identity(start=42, cycle=True), primary_key=True), - Column('data', String) + Column("id", Integer, Identity(start=42, cycle=True), primary_key=True), + Column("data", String), ) The DDL for the ``data`` table when run on a PostgreSQL 12 backend will look diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index ffbfc108888..f27caa2d4f7 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -22,7 +22,8 @@ Creating an engine is just a matter of issuing a single call, :func:`_sa.create_engine()`:: from sqlalchemy import create_engine - engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase') + + engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase") The above engine creates a :class:`.Dialect` object tailored towards PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI @@ -118,13 +119,13 @@ The PostgreSQL dialect uses psycopg2 as the default DBAPI. Other PostgreSQL DBAPIs include pg8000 and asyncpg:: # default - engine = create_engine('postgresql://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql://scott:tiger@localhost/mydatabase") # psycopg2 - engine = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/mydatabase") # pg8000 - engine = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase') + engine = create_engine("postgresql+pg8000://scott:tiger@localhost/mydatabase") More notes on connecting to PostgreSQL at :ref:`postgresql_toplevel`. @@ -135,13 +136,13 @@ The MySQL dialect uses mysqlclient as the default DBAPI. There are other MySQL DBAPIs available, including PyMySQL:: # default - engine = create_engine('mysql://scott:tiger@localhost/foo') + engine = create_engine("mysql://scott:tiger@localhost/foo") # mysqlclient (a maintained fork of MySQL-Python) - engine = create_engine('mysql+mysqldb://scott:tiger@localhost/foo') + engine = create_engine("mysql+mysqldb://scott:tiger@localhost/foo") # PyMySQL - engine = create_engine('mysql+pymysql://scott:tiger@localhost/foo') + engine = create_engine("mysql+pymysql://scott:tiger@localhost/foo") More notes on connecting to MySQL at :ref:`mysql_toplevel`. @@ -150,9 +151,9 @@ Oracle The Oracle dialect uses cx_oracle as the default DBAPI:: - engine = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname') + engine = create_engine("oracle://scott:tiger@127.0.0.1:1521/sidname") - engine = create_engine('oracle+cx_oracle://scott:tiger@tnsname') + engine = create_engine("oracle+cx_oracle://scott:tiger@tnsname") More notes on connecting to Oracle at :ref:`oracle_toplevel`. @@ -163,10 +164,10 @@ The SQL Server dialect uses pyodbc as the default DBAPI. pymssql is also available:: # pyodbc - engine = create_engine('mssql+pyodbc://scott:tiger@mydsn') + engine = create_engine("mssql+pyodbc://scott:tiger@mydsn") # pymssql - engine = create_engine('mssql+pymssql://scott:tiger@hostname:port/dbname') + engine = create_engine("mssql+pymssql://scott:tiger@hostname:port/dbname") More notes on connecting to SQL Server at :ref:`mssql_toplevel`. @@ -182,22 +183,22 @@ For a relative file path, this requires three slashes:: # sqlite:/// # where is relative: - engine = create_engine('sqlite:///foo.db') + engine = create_engine("sqlite:///foo.db") And for an absolute file path, the three slashes are followed by the absolute path:: # Unix/Mac - 4 initial slashes in total - engine = create_engine('sqlite:////absolute/path/to/foo.db') + engine = create_engine("sqlite:////absolute/path/to/foo.db") # Windows - engine = create_engine('sqlite:///C:\\path\\to\\foo.db') + engine = create_engine("sqlite:///C:\\path\\to\\foo.db") # Windows alternative using raw string - engine = create_engine(r'sqlite:///C:\path\to\foo.db') + engine = create_engine(r"sqlite:///C:\path\to\foo.db") To use a SQLite ``:memory:`` database, specify an empty URL:: - engine = create_engine('sqlite://') + engine = create_engine("sqlite://") More notes on connecting to SQLite at :ref:`sqlite_toplevel`. @@ -263,7 +264,9 @@ Engine Creation API for keys and either strings or tuples of strings for values, e.g.:: >>> from sqlalchemy.engine import make_url - >>> url = make_url("postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt") + >>> url = make_url( + ... "postgresql://user:pass@host/dbname?alt_host=host1&alt_host=host2&ssl_cipher=%2Fpath%2Fto%2Fcrt" + ... ) >>> url.query immutabledict({'alt_host': ('host1', 'host2'), 'ssl_cipher': '/path/to/crt'}) @@ -335,9 +338,7 @@ often specified in the query string of the URL directly. A common example of this is DBAPIs that accept an argument ``encoding`` for character encodings, such as most MySQL DBAPIs:: - engine = create_engine( - "mysql+pymysql://user:pass@host/test?charset=utf8mb4" - ) + engine = create_engine("mysql+pymysql://user:pass@host/test?charset=utf8mb4") The advantage of using the query string is that additional DBAPI options may be specified in configuration files in a manner that's portable to the DBAPI @@ -356,7 +357,9 @@ supported at this level. method directly as follows:: >>> from sqlalchemy import create_engine - >>> engine = create_engine("mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4") + >>> engine = create_engine( + ... "mysql+pymysql://some_user:some_pass@some_host/test?charset=utf8mb4" + ... ) >>> args, kwargs = engine.dialect.create_connect_args(engine.url) >>> args, kwargs ([], {'host': 'some_host', 'database': 'test', 'user': 'some_user', 'password': 'some_pass', 'charset': 'utf8mb4', 'client_flag': 2}) @@ -381,14 +384,14 @@ underlying implementation the connection:: engine = create_engine( "postgresql://user:pass@hostname/dbname", - connect_args={"connection_factory": MyConnectionFactory} + connect_args={"connection_factory": MyConnectionFactory}, ) Another example is the pyodbc "timeout" parameter:: engine = create_engine( - "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", - connect_args={"timeout": 30} + "mssql+pyodbc://user:pass@sqlsrvr?driver=ODBC+Driver+13+for+SQL+Server", + connect_args={"timeout": 30}, ) The above example also illustrates that both URL "query string" parameters as @@ -409,9 +412,10 @@ collections can then be modified in place to alter how they are used:: engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): - cparams['connection_factory'] = MyConnectionFactory + cparams["connection_factory"] = MyConnectionFactory .. _engines_dynamic_tokens: @@ -428,9 +432,10 @@ parameter, this could be implemented as:: engine = create_engine("postgresql://user@hostname/dbname") + @event.listens_for(engine, "do_connect") def provide_token(dialect, conn_rec, cargs, cparams): - cparams['token'] = get_authentication_token() + cparams["token"] = get_authentication_token() .. seealso:: @@ -449,9 +454,8 @@ SQLAlchemy:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): @@ -459,7 +463,6 @@ SQLAlchemy:: cursor_obj.execute("SET some session variables") cursor_obj.close() - Fully Replacing the DBAPI ``connect()`` function ------------------------------------------------ @@ -469,9 +472,8 @@ and returning it:: from sqlalchemy import event - engine = create_engine( - "postgresql://user:pass@hostname/dbname" - ) + engine = create_engine("postgresql://user:pass@hostname/dbname") + @event.listens_for(engine, "do_connect") def receive_do_connect(dialect, conn_rec, cargs, cparams): @@ -531,7 +533,7 @@ For example, to log SQL queries using Python logging instead of the import logging logging.basicConfig() - logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO) + logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) By default, the log level is set to ``logging.WARN`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an @@ -559,10 +561,9 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: >>> from sqlalchemy import create_engine, text - >>> e = create_engine("sqlite://", echo=True, echo_pool='debug') + >>> e = create_engine("sqlite://", echo=True, echo_pool="debug") >>> with e.connect() as conn: - ... print(conn.scalar(text("select 'hi'"))) - ... + ... print(conn.scalar(text("select 'hi'"))) 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Created new connection 2020-10-24 12:54:57,701 DEBUG sqlalchemy.pool.impl.SingletonThreadPool Connection checked out from pool 2020-10-24 12:54:57,702 INFO sqlalchemy.engine.Engine select 'hi' @@ -574,6 +575,7 @@ parameters are a shortcut to immediate logging to ``sys.stdout``:: Use of these flags is roughly equivalent to:: import logging + logging.basicConfig() logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO) logging.getLogger("sqlalchemy.pool").setLevel(logging.DEBUG) @@ -597,10 +599,9 @@ string. To set this to a specific name, use the >>> from sqlalchemy import create_engine >>> from sqlalchemy import text - >>> e = create_engine("sqlite://", echo=True, logging_name='myengine') + >>> e = create_engine("sqlite://", echo=True, logging_name="myengine") >>> with e.connect() as conn: ... conn.execute(text("select 'hi'")) - ... 2020-10-24 12:47:04,291 INFO sqlalchemy.engine.Engine.myengine select 'hi' 2020-10-24 12:47:04,292 INFO sqlalchemy.engine.Engine.myengine () @@ -669,7 +670,6 @@ these parameters from being logged for privacy purposes, enable the >>> e = create_engine("sqlite://", echo=True, hide_parameters=True) >>> with e.connect() as conn: ... conn.execute(text("select :some_private_name"), {"some_private_name": "pii"}) - ... 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine select ? 2020-10-24 12:48:32,808 INFO sqlalchemy.engine.Engine [SQL parameters hidden due to hide_parameters=True] diff --git a/doc/build/core/event.rst b/doc/build/core/event.rst index af4e33ba9a5..fbdc72183e5 100644 --- a/doc/build/core/event.rst +++ b/doc/build/core/event.rst @@ -25,16 +25,19 @@ and that a user-defined listener function should receive two positional argument from sqlalchemy.event import listen from sqlalchemy.pool import Pool + def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) - listen(Pool, 'connect', my_on_connect) + + listen(Pool, "connect", my_on_connect) To listen with the :func:`.listens_for` decorator looks like:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect") def my_on_connect(dbapi_con, connection_record): print("New DBAPI connection:", dbapi_con) @@ -54,9 +57,10 @@ that accepts ``**keyword`` arguments, by passing ``named=True`` to either from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(**kw): - print("New DBAPI connection:", kw['dbapi_connection']) + print("New DBAPI connection:", kw["dbapi_connection"]) When using named argument passing, the names listed in the function argument specification will be used as keys in the dictionary. @@ -68,10 +72,11 @@ as long as the names match up:: from sqlalchemy.event import listens_for from sqlalchemy.pool import Pool + @listens_for(Pool, "connect", named=True) def my_on_connect(dbapi_connection, **kw): print("New DBAPI connection:", dbapi_connection) - print("Connection record:", kw['connection_record']) + print("Connection record:", kw["connection_record"]) Above, the presence of ``**kw`` tells :func:`.listens_for` that arguments should be passed to the function by name, rather than positionally. @@ -95,25 +100,26 @@ and objects:: from sqlalchemy.engine import Engine import psycopg2 + def connect(): - return psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + return psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") + my_pool = QueuePool(connect) - my_engine = create_engine('postgresql://ed@localhost/test') + my_engine = create_engine("postgresql://ed@localhost/test") # associate listener with all instances of Pool - listen(Pool, 'connect', my_on_connect) + listen(Pool, "connect", my_on_connect) # associate listener with all instances of Pool # via the Engine class - listen(Engine, 'connect', my_on_connect) + listen(Engine, "connect", my_on_connect) # associate listener with my_pool - listen(my_pool, 'connect', my_on_connect) + listen(my_pool, "connect", my_on_connect) # associate listener with my_engine.pool - listen(my_engine, 'connect', my_on_connect) - + listen(my_engine, "connect", my_on_connect) .. _event_modifiers: @@ -130,11 +136,12 @@ this value can be supported:: def validate_phone(target, value, oldvalue, initiator): """Strip non-numeric characters from a phone number""" - return re.sub(r'\D', '', value) + return re.sub(r"\D", "", value) + # setup listener on UserContact.phone attribute, instructing # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) + listen(UserContact.phone, "set", validate_phone, retval=True) Event Reference --------------- diff --git a/doc/build/core/functions.rst b/doc/build/core/functions.rst index efa7c78d33f..6fcee6edaa2 100644 --- a/doc/build/core/functions.rst +++ b/doc/build/core/functions.rst @@ -44,7 +44,7 @@ common SQL functions that set up the expected return type for each function automatically. The are invoked in the same way as any other member of the :data:`_sql.func` namespace:: - select(func.count('*')).select_from(some_table) + select(func.count("*")).select_from(some_table) Note that any name not known to :data:`_sql.func` generates the function name as is - there is no restriction on what SQL functions can be called, known or diff --git a/doc/build/core/future.rst b/doc/build/core/future.rst index 204e401350d..6323e732a3d 100644 --- a/doc/build/core/future.rst +++ b/doc/build/core/future.rst @@ -15,6 +15,7 @@ by passing the :paramref:`_sa.create_engine.future` flag to :func:`_sa.create_engine`:: from sqlalchemy import create_engine + engine = create_engine("postgresql://user:pass@host/dbname", future=True) Similarly, with the ORM, to enable "future" behavior in the ORM :class:`.Session`, diff --git a/doc/build/core/metadata.rst b/doc/build/core/metadata.rst index 03721c2b6c3..154472af5dc 100644 --- a/doc/build/core/metadata.rst +++ b/doc/build/core/metadata.rst @@ -37,12 +37,12 @@ The remaining positional arguments are mostly from sqlalchemy import Table, Column, Integer, String user = Table( - 'user', + "user", metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60)), - Column('nickname', String(50), nullable=False) + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60)), + Column("nickname", String(50), nullable=False), ) Above, a table called ``user`` is described, which contains four columns. The @@ -69,7 +69,7 @@ dependency (that is, each table is preceded by all tables which it references):: >>> for t in metadata_obj.sorted_tables: - ... print(t.name) + ... print(t.name) user user_preference invoice @@ -82,10 +82,12 @@ module-level variables in an application. Once a accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition:: - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - @@ -100,7 +102,7 @@ table include:: employees.c.employee_id # via string - employees.c['employee_id'] + employees.c["employee_id"] # iterate through all columns for c in employees.c: @@ -171,22 +173,26 @@ will issue the CREATE statements: .. sourcecode:: python+sql - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() - user = Table('user', metadata_obj, - Column('user_id', Integer, primary_key=True), - Column('user_name', String(16), nullable=False), - Column('email_address', String(60), key='email'), - Column('nickname', String(50), nullable=False) + user = Table( + "user", + metadata_obj, + Column("user_id", Integer, primary_key=True), + Column("user_name", String(16), nullable=False), + Column("email_address", String(60), key="email"), + Column("nickname", String(50), nullable=False), ) - user_prefs = Table('user_prefs', metadata_obj, - Column('pref_id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False), - Column('pref_name', String(40), nullable=False), - Column('pref_value', String(100)) + user_prefs = Table( + "user_prefs", + metadata_obj, + Column("pref_id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.user_id"), nullable=False), + Column("pref_name", String(40), nullable=False), + Column("pref_value", String(100)), ) {sql}metadata_obj.create_all(engine) @@ -222,14 +228,16 @@ default issue the CREATE or DROP regardless of the table being present: .. sourcecode:: python+sql - engine = create_engine('sqlite:///:memory:') + engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() - employees = Table('employees', metadata_obj, - Column('employee_id', Integer, primary_key=True), - Column('employee_name', String(60), nullable=False, key='name'), - Column('employee_dept', Integer, ForeignKey("departments.department_id")) + employees = Table( + "employees", + metadata_obj, + Column("employee_id", Integer, primary_key=True), + Column("employee_name", String(60), nullable=False, key="name"), + Column("employee_dept", Integer, ForeignKey("departments.department_id")), ) {sql}employees.create(engine) CREATE TABLE employees( @@ -340,11 +348,11 @@ using a Core :class:`_schema.Table` object as follows:: metadata_obj = MetaData() financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema="remote_banks", ) SQL that is rendered using this :class:`_schema.Table`, such as the SELECT @@ -361,7 +369,7 @@ using the combination of the schema and table name. We can view this in the :attr:`_schema.MetaData.tables` collection by searching for the key ``'remote_banks.financial_info'``:: - >>> metadata_obj.tables['remote_banks.financial_info'] + >>> metadata_obj.tables["remote_banks.financial_info"] Table('financial_info', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('value', String(length=100), table=, nullable=False), @@ -374,9 +382,9 @@ objects, even if the referring table is also in that same schema:: customer = Table( "customer", metadata_obj, - Column('id', Integer, primary_key=True), - Column('financial_info_id', ForeignKey("remote_banks.financial_info.id")), - schema='remote_banks' + Column("id", Integer, primary_key=True), + Column("financial_info_id", ForeignKey("remote_banks.financial_info.id")), + schema="remote_banks", ) The :paramref:`_schema.Table.schema` argument may also be used with certain @@ -386,7 +394,7 @@ important on a database such as Microsoft SQL Server where there are often dotted "database/owner" tokens. The tokens may be placed directly in the name at once, such as:: - schema="dbo.scott" + schema = "dbo.scott" .. seealso:: @@ -409,10 +417,10 @@ construct:: metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), ) Above, for any :class:`_schema.Table` object (or :class:`_schema.Sequence` object @@ -422,7 +430,7 @@ act as though the parameter were set to the value ``"remote_banks"``. This includes that the :class:`_schema.Table` is cataloged in the :class:`_schema.MetaData` using the schema-qualified name, that is:: - metadata_obj.tables['remote_banks.financial_info'] + metadata_obj.tables["remote_banks.financial_info"] When using the :class:`_schema.ForeignKey` or :class:`_schema.ForeignKeyConstraint` objects to refer to this table, either the schema-qualified name or the @@ -432,20 +440,20 @@ table:: # either will work: refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("financial_info.id")), ) # or refers_to_financial_info = Table( - 'refers_to_financial_info', + "refers_to_financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('fiid', ForeignKey('remote_banks.financial_info.id')), + Column("id", Integer, primary_key=True), + Column("fiid", ForeignKey("remote_banks.financial_info.id")), ) When using a :class:`_schema.MetaData` object that sets @@ -458,11 +466,11 @@ to specify that it should not be schema qualified may use the special symbol metadata_obj = MetaData(schema="remote_banks") financial_info = Table( - 'financial_info', + "financial_info", metadata_obj, - Column('id', Integer, primary_key=True), - Column('value', String(100), nullable=False), - schema=BLANK_SCHEMA # will not use "remote_banks" + Column("id", Integer, primary_key=True), + Column("value", String(100), nullable=False), + schema=BLANK_SCHEMA, # will not use "remote_banks" ) .. seealso:: @@ -511,6 +519,7 @@ Oracle CURRENT_SCHEMA variable to an alternate name:: engine = create_engine("oracle+cx_oracle://scott:tiger@tsn_name") + @event.listens_for(engine, "connect", insert=True) def set_current_schema(dbapi_connection, connection_record): cursor_obj = dbapi_connection.cursor() @@ -552,11 +561,13 @@ example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``:: - addresses = Table('engine_email_addresses', metadata_obj, - Column('address_id', Integer, primary_key=True), - Column('remote_user_id', Integer, ForeignKey(users.c.user_id)), - Column('email_address', String(20)), - mysql_engine='InnoDB' + addresses = Table( + "engine_email_addresses", + metadata_obj, + Column("address_id", Integer, primary_key=True), + Column("remote_user_id", Integer, ForeignKey(users.c.user_id)), + Column("email_address", String(20)), + mysql_engine="InnoDB", ) Other backends may support table-level options as well - these would be diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index d119db1e0cc..10b6db33805 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -11,17 +11,17 @@ Operator Reference >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -30,7 +30,7 @@ Operator Reference >>> Base = declarative_base() >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -39,14 +39,14 @@ Operator Reference ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -55,22 +55,34 @@ Operator Reference >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -108,49 +120,49 @@ strings, dates, and many others: * :meth:`_sql.ColumnOperators.__eq__` (Python "``==``" operator):: - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 .. * :meth:`_sql.ColumnOperators.__ne__` (Python "``!=``" operator):: - >>> print(column('x') != 5) + >>> print(column("x") != 5) x != :x_1 .. * :meth:`_sql.ColumnOperators.__gt__` (Python "``>``" operator):: - >>> print(column('x') > 5) + >>> print(column("x") > 5) x > :x_1 .. * :meth:`_sql.ColumnOperators.__lt__` (Python "``<``" operator):: - >>> print(column('x') < 5) + >>> print(column("x") < 5) x < :x_1 .. * :meth:`_sql.ColumnOperators.__ge__` (Python "``>=``" operator):: - >>> print(column('x') >= 5) + >>> print(column("x") >= 5) x >= :x_1 .. * :meth:`_sql.ColumnOperators.__le__` (Python "``<=``" operator):: - >>> print(column('x') <= 5) + >>> print(column("x") <= 5) x <= :x_1 .. * :meth:`_sql.ColumnOperators.between`:: - >>> print(column('x').between(5, 10)) + >>> print(column("x").between(5, 10)) x BETWEEN :x_1 AND :x_2 .. @@ -171,7 +183,7 @@ IN is available most typically by passing a list of values to the :meth:`_sql.ColumnOperators.in_` method:: - >>> print(column('x').in_([1, 2, 3])) + >>> print(column("x").in_([1, 2, 3])) x IN (__[POSTCOMPILE_x_1]) The special bound form ``__[POSTCOMPILE`` is rendered into individual parameters @@ -211,12 +223,12 @@ NOT IN "NOT IN" is available via the :meth:`_sql.ColumnOperators.not_in` operator:: - >>> print(column('x').not_in([1, 2, 3])) + >>> print(column("x").not_in([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) This is typically more easily available by negating with the ``~`` operator:: - >>> print(~column('x').in_([1, 2, 3])) + >>> print(~column("x").in_([1, 2, 3])) (x NOT IN (__[POSTCOMPILE_x_1])) Tuple IN Expressions @@ -229,7 +241,7 @@ building block for tuple comparisons. The :meth:`_sql.Tuple.in_` operator then receives a list of tuples:: >>> from sqlalchemy import tuple_ - >>> tup = tuple_(column('x', Integer), column('y', Integer)) + >>> tup = tuple_(column("x", Integer), column("y", Integer)) >>> expr = tup.in_([(1, 2), (3, 4)]) >>> print(expr) (x, y) IN (__[POSTCOMPILE_param_1]) @@ -256,14 +268,14 @@ operators work with subqueries. The form provides that a :class:`_sql.Select` construct is passed in directly, without any explicit conversion to a named subquery:: - >>> print(column('x').in_(select(user_table.c.id))) + >>> print(column("x").in_(select(user_table.c.id))) x IN (SELECT user_account.id FROM user_account) Tuples work as expected:: >>> print( - ... tuple_(column('x'), column('y')).in_( + ... tuple_(column("x"), column("y")).in_( ... select(user_table.c.id, address_table.c.id).join(address_table) ... ) ... ) @@ -283,14 +295,14 @@ databases support: as " IS NULL". The ``NULL`` constant is most easily acquired using regular Python ``None``:: - >>> print(column('x').is_(None)) + >>> print(column("x").is_(None)) x IS NULL SQL NULL is also explicitly available, if needed, using the :func:`_sql.null` construct:: >>> from sqlalchemy import null - >>> print(column('x').is_(null())) + >>> print(column("x").is_(null())) x IS NULL The :meth:`_sql.ColumnOperators.is_` operator is automatically invoked when @@ -300,7 +312,7 @@ databases support: explicitly, paricularly when used with a dynamic value:: >>> a = None - >>> print(column('x') == a) + >>> print(column("x") == a) x IS NULL Note that the Python ``is`` operator is **not overloaded**. Even though @@ -311,26 +323,26 @@ databases support: Similar to :meth:`_sql.ColumnOperators.is_`, produces "IS NOT":: - >>> print(column('x').is_not(None)) + >>> print(column("x").is_not(None)) x IS NOT NULL Is similarly equivalent to ``!= None``:: - >>> print(column('x') != None) + >>> print(column("x") != None) x IS NOT NULL * :meth:`_sql.ColumnOperators.is_distinct_from`: Produces SQL IS DISTINCT FROM:: - >>> print(column('x').is_distinct_from('some value')) + >>> print(column("x").is_distinct_from("some value")) x IS DISTINCT FROM :x_1 * :meth:`_sql.ColumnOperators.isnot_distinct_from`: Produces SQL IS NOT DISTINCT FROM:: - >>> print(column('x').isnot_distinct_from('some value')) + >>> print(column("x").isnot_distinct_from("some value")) x IS NOT DISTINCT FROM :x_1 String Comparisons @@ -338,7 +350,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.like`:: - >>> print(column('x').like('word')) + >>> print(column("x").like("word")) x LIKE :x_1 .. @@ -348,14 +360,14 @@ String Comparisons Case insensitive LIKE makes use of the SQL ``lower()`` function on a generic backend. On the PostgreSQL backend it will use ``ILIKE``:: - >>> print(column('x').ilike('word')) + >>> print(column("x").ilike("word")) lower(x) LIKE lower(:x_1) .. * :meth:`_sql.ColumnOperators.notlike`:: - >>> print(column('x').notlike('word')) + >>> print(column("x").notlike("word")) x NOT LIKE :x_1 .. @@ -363,7 +375,7 @@ String Comparisons * :meth:`_sql.ColumnOperators.notilike`:: - >>> print(column('x').notilike('word')) + >>> print(column("x").notilike("word")) lower(x) NOT LIKE lower(:x_1) .. @@ -378,21 +390,21 @@ backends or sometimes a function like ``concat()``: * :meth:`_sql.ColumnOperators.startswith`:: The string containment operators - >>> print(column('x').startswith('word')) + >>> print(column("x").startswith("word")) x LIKE :x_1 || '%' .. * :meth:`_sql.ColumnOperators.endswith`:: - >>> print(column('x').endswith('word')) + >>> print(column("x").endswith("word")) x LIKE '%' || :x_1 .. * :meth:`_sql.ColumnOperators.contains`:: - >>> print(column('x').contains('word')) + >>> print(column("x").contains("word")) x LIKE '%' || :x_1 || '%' .. @@ -408,7 +420,7 @@ behaviors and results on different databases: This is a dialect-specific operator that makes use of the MATCH feature of the underlying database, if available:: - >>> print(column('x').match('word')) + >>> print(column("x").match("word")) x MATCH :x_1 .. @@ -419,13 +431,13 @@ behaviors and results on different databases: for example the PostgreSQL dialect:: >>> from sqlalchemy.dialects import postgresql - >>> print(column('x').regexp_match('word').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=postgresql.dialect())) x ~ %(x_1)s Or MySQL:: >>> from sqlalchemy.dialects import mysql - >>> print(column('x').regexp_match('word').compile(dialect=mysql.dialect())) + >>> print(column("x").regexp_match("word").compile(dialect=mysql.dialect())) x REGEXP %s .. @@ -440,20 +452,20 @@ String Alteration String concatenation:: - >>> print(column('x').concat("some string")) + >>> print(column("x").concat("some string")) x || :x_1 This operator is available via :meth:`_sql.ColumnOperators.__add__`, that is, the Python ``+`` operator, when working with a column expression that derives from :class:`_types.String`:: - >>> print(column('x', String) + "some string") + >>> print(column("x", String) + "some string") x || :x_1 The operator will produce the appropriate database-specific construct, such as on MySQL it's historically been the ``concat()`` SQL function:: - >>> print((column('x', String) + "some string").compile(dialect=mysql.dialect())) + >>> print((column("x", String) + "some string").compile(dialect=mysql.dialect())) concat(x, %s) .. @@ -463,7 +475,7 @@ String Alteration Complementary to :meth:`_sql.ColumnOperators.regexp` this produces REGEXP REPLACE equivalent for the backends which support it:: - >>> print(column('x').regexp_replace('foo', 'bar').compile(dialect=postgresql.dialect())) + >>> print(column("x").regexp_replace("foo", "bar").compile(dialect=postgresql.dialect())) REGEXP_REPLACE(x, %(x_1)s, %(x_2)s) .. @@ -473,7 +485,11 @@ String Alteration Produces the COLLATE SQL operator which provides for specific collations at expression time:: - >>> print((column('x').collate('latin1_german2_ci') == 'Müller').compile(dialect=mysql.dialect())) + >>> print( + ... (column("x").collate("latin1_german2_ci") == "Müller").compile( + ... dialect=mysql.dialect() + ... ) + ... ) (x COLLATE latin1_german2_ci) = %s @@ -481,7 +497,11 @@ String Alteration >>> from sqlalchemy import literal - >>> print((literal('Müller').collate('latin1_german2_ci') == column('x')).compile(dialect=mysql.dialect())) + >>> print( + ... (literal("Müller").collate("latin1_german2_ci") == column("x")).compile( + ... dialect=mysql.dialect() + ... ) + ... ) (%s COLLATE latin1_german2_ci) = x .. @@ -491,10 +511,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__add__`, :meth:`_sql.ColumnOperators.__radd__` (Python "``+``" operator):: - >>> print(column('x') + 5) + >>> print(column("x") + 5) x + :x_1 - >>> print(5 + column('x')) + >>> print(5 + column("x")) :x_1 + x .. @@ -507,10 +527,10 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__sub__`, :meth:`_sql.ColumnOperators.__rsub__` (Python "``-``" operator):: - >>> print(column('x') - 5) + >>> print(column("x") - 5) x - :x_1 - >>> print(5 - column('x')) + >>> print(5 - column("x")) :x_1 - x .. @@ -518,19 +538,19 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mul__`, :meth:`_sql.ColumnOperators.__rmul__` (Python "``*``" operator):: - >>> print(column('x') * 5) + >>> print(column("x") * 5) x * :x_1 - >>> print(5 * column('x')) + >>> print(5 * column("x")) :x_1 * x .. * :meth:`_sql.ColumnOperators.__div__`, :meth:`_sql.ColumnOperators.__rdiv__` (Python "``/``" operator):: - >>> print(column('x') / 5) + >>> print(column("x") / 5) x / :x_1 - >>> print(5 / column('x')) + >>> print(5 / column("x")) :x_1 / x .. @@ -538,9 +558,9 @@ Arithmetic Operators * :meth:`_sql.ColumnOperators.__mod__`, :meth:`_sql.ColumnOperators.__rmod__` (Python "``%``" operator):: - >>> print(column('x') % 5) + >>> print(column("x") % 5) x % :x_1 - >>> print(5 % column('x')) + >>> print(5 % column("x")) :x_1 % x .. @@ -553,10 +573,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Update.where` and :meth:`_sql.Delete.where`:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) - ... ) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -564,12 +584,10 @@ The most common conjunction, "AND", is automatically applied if we make repeated :meth:`_sql.Select.where`, :meth:`_sql.Update.where` and :meth:`_sql.Delete.where` also accept multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id - ... ) - ... ) + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id + ... ) + ... ) SELECT address.email_address FROM address, user_account WHERE user_account.name = :name_1 AND address.user_id = user_account.id @@ -579,11 +597,10 @@ The "AND" conjunction, as well as its partner "OR", are both available directly >>> from sqlalchemy import and_, or_ >>> print( - ... select(address_table.c.email_address). - ... where( + ... select(address_table.c.email_address).where( ... and_( - ... or_(user_table.c.name == 'squidward', user_table.c.name == 'sandy'), - ... address_table.c.user_id == user_table.c.id + ... or_(user_table.c.name == "squidward", user_table.c.name == "sandy"), + ... address_table.c.user_id == user_table.c.id, ... ) ... ) ... ) @@ -596,13 +613,13 @@ A negation is available using the :func:`_sql.not_` function. This will typically invert the operator in a boolean expression:: >>> from sqlalchemy import not_ - >>> print(not_(column('x') == 5)) + >>> print(not_(column("x") == 5)) x != :x_1 It also may apply a keyword such as ``NOT`` when appropriate:: >>> from sqlalchemy import Boolean - >>> print(not_(column('x', Boolean))) + >>> print(not_(column("x", Boolean))) NOT x @@ -622,7 +639,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``&`` operator is overloaded to behave the same as :func:`_sql.and_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) & (column('y') == 10)) + >>> print((column("x") == 5) & (column("y") == 10)) x = :x_1 AND y = :y_1 .. @@ -633,7 +650,7 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, The Python binary ``|`` operator is overloaded to behave the same as :func:`_sql.or_` (note parenthesis around the two operands):: - >>> print((column('x') == 5) | (column('y') == 10)) + >>> print((column("x") == 5) | (column("y") == 10)) x = :x_1 OR y = :y_1 .. @@ -645,11 +662,11 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, as :func:`_sql.not_`, either inverting the existing operator, or applying the ``NOT`` keyword to the expression as a whole:: - >>> print(~(column('x') == 5)) + >>> print(~(column("x") == 5)) x != :x_1 >>> from sqlalchemy import Boolean - >>> print(~column('x', Boolean)) + >>> print(~column("x", Boolean)) NOT x .. @@ -665,4 +682,4 @@ TODO .. Setup code, not for display >>> conn.close() - ROLLBACK \ No newline at end of file + ROLLBACK diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 59223ee7aaa..b8800ead4af 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -35,8 +35,7 @@ directly to :func:`~sqlalchemy.create_engine` as keyword arguments: ``pool_size``, ``max_overflow``, ``pool_recycle`` and ``pool_timeout``. For example:: - engine = create_engine('postgresql://me@localhost/mydb', - pool_size=20, max_overflow=0) + engine = create_engine("postgresql://me@localhost/mydb", pool_size=20, max_overflow=0) In the case of SQLite, the :class:`.SingletonThreadPool` or :class:`.NullPool` are selected by the dialect to provide @@ -68,14 +67,16 @@ of building the pool for you. Common options include specifying :class:`.QueuePool` with SQLite:: from sqlalchemy.pool import QueuePool - engine = create_engine('sqlite:///file.db', poolclass=QueuePool) + + engine = create_engine("sqlite:///file.db", poolclass=QueuePool) Disabling pooling using :class:`.NullPool`:: from sqlalchemy.pool import NullPool + engine = create_engine( - 'postgresql+psycopg2://scott:tiger@localhost/test', - poolclass=NullPool) + "postgresql+psycopg2://scott:tiger@localhost/test", poolclass=NullPool + ) Using a Custom Connection Function ---------------------------------- @@ -95,10 +96,12 @@ by any additional options:: import sqlalchemy.pool as pool import psycopg2 + def getconn(): - c = psycopg2.connect(user='ed', host='127.0.0.1', dbname='test') + c = psycopg2.connect(user="ed", host="127.0.0.1", dbname="test") return c + mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5) DBAPI connections can then be procured from the pool using the @@ -263,6 +266,7 @@ behaviors are needed:: some_engine = create_engine(...) + @event.listens_for(some_engine, "engine_connect") def ping_connection(connection, branch): if branch: @@ -327,6 +331,7 @@ that they are replaced with new ones upon next checkout. This flow is illustrated by the code example below:: from sqlalchemy import create_engine, exc + e = create_engine(...) c = e.connect() @@ -365,6 +370,7 @@ such as MySQL that automatically close connections that have been stale after a period of time:: from sqlalchemy import create_engine + e = create_engine("mysql://scott:tiger@localhost/test", pool_recycle=3600) Above, any DBAPI connection that has been open for more than one hour will be invalidated and replaced, @@ -433,8 +439,7 @@ close these connections out. The difference between FIFO and LIFO is basically whether or not its desirable for the pool to keep a full set of connections ready to go even during idle periods:: - engine = create_engine( - "postgreql://", pool_use_lifo=True, pool_pre_ping=True) + engine = create_engine("postgreql://", pool_use_lifo=True, pool_pre_ping=True) Above, we also make use of the :paramref:`_sa.create_engine.pool_pre_ping` flag so that connections which are closed from the server side are gracefully @@ -476,8 +481,8 @@ are three general approaches to this: more than once:: from sqlalchemy.pool import NullPool - engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) + engine = create_engine("mysql://user:pass@host/dbname", poolclass=NullPool) 2. Call :meth:`_engine.Engine.dispose` on any given :class:`_engine.Engine`, passing the :paramref:`.Engine.dispose.close` parameter with a value of @@ -490,19 +495,21 @@ are three general approaches to this: engine = create_engine("mysql+mysqldb://user:pass@host/dbname") + def run_in_process(some_data_record): with engine.connect() as conn: conn.execute(text("...")) + def initializer(): """ensure the parent proc's database connections are not touched - in the new connection pool""" + in the new connection pool""" engine.dispose(close=False) + with Pool(10, initializer=initializer) as p: p.map(run_in_process, data) - .. versionadded:: 1.4.33 Added the :paramref:`.Engine.dispose.close` parameter to allow the replacement of a connection pool in a child process without interfering with the connections used by the parent @@ -527,10 +534,12 @@ are three general approaches to this: engine = create_engine("mysql://user:pass@host/dbname") + def run_in_process(): with engine.connect() as conn: conn.execute(text("...")) + # before process starts, ensure engine.dispose() is called engine.dispose() p = Process(target=run_in_process) @@ -545,19 +554,20 @@ are three general approaches to this: engine = create_engine("...") + @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): - connection_record.info['pid'] = os.getpid() + connection_record.info["pid"] = os.getpid() + @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() - if connection_record.info['pid'] != pid: + if connection_record.info["pid"] != pid: connection_record.dbapi_connection = connection_proxy.dbapi_connection = None raise exc.DisconnectionError( - "Connection record belongs to pid %s, " - "attempting to check out in pid %s" % - (connection_record.info['pid'], pid) + "Connection record belongs to pid %s, " + "attempting to check out in pid %s" % (connection_record.info["pid"], pid) ) Above, we use an approach similar to that described in diff --git a/doc/build/core/reflection.rst b/doc/build/core/reflection.rst index d9547344e7b..8c31b7ff000 100644 --- a/doc/build/core/reflection.rst +++ b/doc/build/core/reflection.rst @@ -13,7 +13,7 @@ existing within the database. This process is called *reflection*. In the most simple case you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload_with`` argument:: - >>> messages = Table('messages', metadata_obj, autoload_with=engine) + >>> messages = Table("messages", metadata_obj, autoload_with=engine) >>> [c.name for c in messages.columns] ['message_id', 'message_name', 'date'] @@ -30,7 +30,7 @@ Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded:: - >>> shopping_cart_items = Table('shopping_cart_items', metadata_obj, autoload_with=engine) + >>> shopping_cart_items = Table("shopping_cart_items", metadata_obj, autoload_with=engine) >>> 'shopping_carts' in metadata_obj.tables: True @@ -43,7 +43,7 @@ you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it:: - shopping_carts = Table('shopping_carts', metadata_obj) + shopping_carts = Table("shopping_carts", metadata_obj) Of course, it's a good idea to use ``autoload_with=engine`` with the above table regardless. This is so that the table's attributes will be loaded if they have @@ -61,11 +61,16 @@ Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.:: - >>> mytable = Table('mytable', metadata_obj, - ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key - ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode - ... # additional Column objects which require no change are reflected normally - ... autoload_with=some_engine) + >>> mytable = Table( + ... "mytable", + ... metadata_obj, + ... Column( + ... "id", Integer, primary_key=True + ... ), # override reflected 'id' to have primary key + ... Column("mydata", Unicode(50)), # override reflected 'mydata' to be Unicode + ... # additional Column objects which require no change are reflected normally + ... autoload_with=some_engine, + ... ) .. seealso:: @@ -92,10 +97,12 @@ extrapolate these constraints. Use the "override" technique for this, specifying explicitly those columns which are part of the primary key or have foreign key constraints:: - my_view = Table("some_view", metadata, - Column("view_id", Integer, primary_key=True), - Column("related_thing", Integer, ForeignKey("othertable.thing_id")), - autoload_with=engine + my_view = Table( + "some_view", + metadata, + Column("view_id", Integer, primary_key=True), + Column("related_thing", Integer, ForeignKey("othertable.thing_id")), + autoload_with=engine, ) Reflecting All Tables at Once @@ -109,8 +116,8 @@ object's dictionary of tables:: metadata_obj = MetaData() metadata_obj.reflect(bind=someengine) - users_table = metadata_obj.tables['users'] - addresses_table = metadata_obj.tables['addresses'] + users_table = metadata_obj.tables["users"] + addresses_table = metadata_obj.tables["addresses"] ``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database:: @@ -149,7 +156,7 @@ The end result is that :class:`_schema.Table` objects from the "project" schema will be reflected, and they will be populated as schema-qualified with that name:: - >>> metadata_obj.tables['project.messages'] + >>> metadata_obj.tables["project.messages"] Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') Similarly, an individual :class:`_schema.Table` object that includes the @@ -157,7 +164,7 @@ Similarly, an individual :class:`_schema.Table` object that includes the database schema, overriding any default schema that may have been configured on the owning :class:`_schema.MetaData` collection:: - >>> messages = Table('messages', metadata_obj, schema="project", autoload_with=someengine) + >>> messages = Table("messages", metadata_obj, schema="project", autoload_with=someengine) >>> messages Table('messages', MetaData(), Column('message_id', INTEGER(), table=), schema='project') @@ -246,7 +253,9 @@ semantically equivalent:: >>> # reflect in non-schema qualified fashion >>> messages_table_1 = Table("messages", metadata_obj, autoload_with=someengine) >>> # reflect in schema qualified fashion - >>> messages_table_2 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_2 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) >>> # two different objects >>> messages_table_1 is messages_table_2 False @@ -280,7 +289,9 @@ fashion then loads a related table that will also be performed in a schema qualified fashion:: >>> # reflect "messages" in a schema qualified fashion - >>> messages_table_1 = Table("messages", metadata_obj, schema="project", autoload_with=someengine) + >>> messages_table_1 = Table( + ... "messages", metadata_obj, schema="project", autoload_with=someengine + ... ) The above ``messages_table_1`` will refer to ``projects`` also in a schema qualified fashion. This "projects" table will be reflected automatically by @@ -343,7 +354,8 @@ database is also available. This is known as the "Inspector":: from sqlalchemy import create_engine from sqlalchemy import inspect - engine = create_engine('...') + + engine = create_engine("...") insp = inspect(engine) print(insp.get_table_names()) diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index 7a91e39a3f7..e0b3e179fd0 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -97,7 +97,7 @@ anywhere. To connect we use :func:`~sqlalchemy.create_engine`: .. sourcecode:: pycon+sql >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -154,17 +154,21 @@ addresses" for each row in the "users" table: >>> from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey >>> metadata_obj = MetaData() - >>> users = Table('users', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('fullname', String), + >>> users = Table( + ... "users", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("fullname", String), ... ) - >>> addresses = Table('addresses', metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('users.id')), - ... Column('email_address', String, nullable=False) - ... ) + >>> addresses = Table( + ... "addresses", + ... metadata_obj, + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("users.id")), + ... Column("email_address", String, nullable=False), + ... ) All about how to define :class:`~sqlalchemy.schema.Table` objects, as well as how to create them from an existing database automatically, is described in @@ -206,7 +210,7 @@ each table first before creating, so it's safe to call multiple times: issue CREATE TABLE, a "length" may be provided to the :class:`~sqlalchemy.types.String` type as below:: - Column('name', String(50)) + Column("name", String(50)) The length field on :class:`~sqlalchemy.types.String`, as well as similar precision/scale fields available on :class:`~sqlalchemy.types.Integer`, :class:`~sqlalchemy.types.Numeric`, etc. are not referenced by @@ -217,15 +221,18 @@ each table first before creating, so it's safe to call multiple times: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column('id', Integer, Sequence('user_id_seq'), primary_key=True) + + Column("id", Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` is therefore:: - users = Table('users', metadata_obj, - Column('id', Integer, Sequence('user_id_seq'), primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(50)) + users = Table( + "users", + metadata_obj, + Column("id", Integer, Sequence("user_id_seq"), primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(50)), ) We include this more verbose :class:`_schema.Table` construct separately @@ -255,7 +262,7 @@ Notice above that the INSERT statement names every column in the ``users`` table. This can be limited by using the ``values()`` method, which establishes the VALUES clause of the INSERT explicitly:: - >>> ins = users.insert().values(name='jack', fullname='Jack Jones') + >>> ins = users.insert().values(name="jack", fullname="Jack Jones") >>> str(ins) 'INSERT INTO users (name, fullname) VALUES (:name, :fullname)' @@ -351,7 +358,7 @@ and use it in the "normal" way: .. sourcecode:: pycon+sql >>> ins = users.insert() - >>> conn.execute(ins, {"id": 2, "name":"wendy", "fullname": "Wendy Williams"}) + >>> conn.execute(ins, {"id": 2, "name": "wendy", "fullname": "Wendy Williams"}) {opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?) [...] (2, 'wendy', 'Wendy Williams') COMMIT @@ -370,12 +377,15 @@ inserted, as we do here to add some email addresses: .. sourcecode:: pycon+sql - >>> conn.execute(addresses.insert(), [ - ... {'user_id': 1, 'email_address' : 'jack@yahoo.com'}, - ... {'user_id': 1, 'email_address' : 'jack@msn.com'}, - ... {'user_id': 2, 'email_address' : 'www@www.org'}, - ... {'user_id': 2, 'email_address' : 'wendy@aol.com'}, - ... ]) + >>> conn.execute( + ... addresses.insert(), + ... [ + ... {"user_id": 1, "email_address": "jack@yahoo.com"}, + ... {"user_id": 1, "email_address": "jack@msn.com"}, + ... {"user_id": 2, "email_address": "www@www.org"}, + ... {"user_id": 2, "email_address": "wendy@aol.com"}, + ... ], + ... ) {opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?) [...] ((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com')) COMMIT @@ -484,7 +494,7 @@ programmatically generated, or contains non-ascii characters, the [...] () {stop}>>> row = result.fetchone() - >>> print("name:", row._mapping['name'], "; fullname:", row._mapping['fullname']) + >>> print("name:", row._mapping["name"], "; fullname:", row._mapping["fullname"]) name: jack ; fullname: Jack Jones .. deprecated:: 1.4 @@ -522,7 +532,12 @@ collection: .. sourcecode:: pycon+sql {sql}>>> for row in conn.execute(s): - ... print("name:", row._mapping[users.c.name], "; fullname:", row._mapping[users.c.fullname]) + ... print( + ... "name:", + ... row._mapping[users.c.name], + ... "; fullname:", + ... row._mapping[users.c.fullname], + ... ) SELECT users.id, users.name, users.fullname FROM users [...] () @@ -681,7 +696,7 @@ equals, not equals, etc.: users.name IS NULL >>> # reverse works too - >>> print('fred' > users.c.name) + >>> print("fred" > users.c.name) users.name < :name_1 If we add two integer columns together, we get an addition expression: @@ -707,8 +722,9 @@ not all of them. MySQL users, fear not: .. sourcecode:: pycon+sql - >>> print((users.c.name + users.c.fullname). - ... compile(bind=create_engine('mysql://'))) # doctest: +SKIP + >>> print( + ... (users.c.name + users.c.fullname).compile(bind=create_engine("mysql://")) + ... ) # doctest: +SKIP concat(users.name, users.fullname) The above illustrates the SQL that's generated for an @@ -720,12 +736,12 @@ always use the :meth:`.Operators.op` method; this generates whatever operator yo .. sourcecode:: pycon+sql - >>> print(users.c.name.op('tiddlywinks')('foo')) + >>> print(users.c.name.op("tiddlywinks")("foo")) users.name tiddlywinks :name_1 This function can also be used to make bitwise operators explicit. For example:: - somecolumn.op('&')(0xff) + somecolumn.op("&")(0xFF) is a bitwise AND of the value in ``somecolumn``. @@ -735,15 +751,14 @@ column. For this case, be sure to make the type explicit, if not what's normally expected, using :func:`.type_coerce`:: from sqlalchemy import type_coerce - expr = type_coerce(somecolumn.op('-%>')('foo'), MySpecialType()) - stmt = select(expr) + expr = type_coerce(somecolumn.op("-%>")("foo"), MySpecialType()) + stmt = select(expr) For boolean operators, use the :meth:`.Operators.bool_op` method, which will ensure that the return type of the expression is handled as boolean:: - somecolumn.bool_op('-->')('some value') - + somecolumn.bool_op("-->")("some value") Commonly Used Operators ------------------------- @@ -760,11 +775,11 @@ objects is at :class:`.ColumnOperators`. * :meth:`equals <.ColumnOperators.__eq__>`:: - statement.where(users.c.name == 'ed') + statement.where(users.c.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - statement.where(users.c.name != 'ed') + statement.where(users.c.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -785,23 +800,25 @@ objects is at :class:`.ColumnOperators`. * :meth:`IN <.ColumnOperators.in_>`:: - statement.where(users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(users.c.name.in_(["ed", "wendy", "jack"])) # works with Select objects too: - statement.where.filter(users.c.name.in_( - select(users.c.name).where(users.c.name.like('%ed%')) - )) + statement.where.filter( + users.c.name.in_(select(users.c.name).where(users.c.name.like("%ed%"))) + ) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + statement.where( - tuple_(users.c.name, users.c.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(users.c.name, users.c.nickname).in_( + [("ed", "edsnickname"), ("wendy", "windy")] + ) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - statement.where(~users.c.name.in_(['ed', 'wendy', 'jack'])) + statement.where(~users.c.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: @@ -878,16 +895,17 @@ a :meth:`~.ColumnOperators.like`): .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import and_, or_, not_ - >>> print(and_( - ... users.c.name.like('j%'), + >>> print( + ... and_( + ... users.c.name.like("j%"), ... users.c.id == addresses.c.user_id, ... or_( - ... addresses.c.email_address == 'wendy@aol.com', - ... addresses.c.email_address == 'jack@yahoo.com' + ... addresses.c.email_address == "wendy@aol.com", + ... addresses.c.email_address == "jack@yahoo.com", ... ), - ... not_(users.c.id > 5) - ... ) - ... ) + ... not_(users.c.id > 5), + ... ) + ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2) @@ -899,12 +917,14 @@ parenthesis: .. sourcecode:: pycon+sql - >>> print(users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & - ... ( - ... (addresses.c.email_address == 'wendy@aol.com') | \ - ... (addresses.c.email_address == 'jack@yahoo.com') - ... ) \ - ... & ~(users.c.id>5) + >>> print( + ... users.c.name.like("j%") + ... & (users.c.id == addresses.c.user_id) + ... & ( + ... (addresses.c.email_address == "wendy@aol.com") + ... | (addresses.c.email_address == "jack@yahoo.com") + ... ) + ... & ~(users.c.id > 5) ... ) users.name LIKE :name_1 AND users.id = addresses.user_id AND (addresses.email_address = :email_address_1 @@ -923,19 +943,16 @@ not have a name: .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where( - ... and_( - ... users.c.id == addresses.c.user_id, - ... users.c.name.between('m', 'z'), - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) - ... ) + >>> s = select((users.c.fullname + ", " + addresses.c.email_address).label("title")).where( + ... and_( + ... users.c.id == addresses.c.user_id, + ... users.c.name.between("m", "z"), + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ), + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -954,17 +971,17 @@ A shortcut to using :func:`.and_` is to chain together multiple .. sourcecode:: pycon+sql - >>> s = select((users.c.fullname + - ... ", " + addresses.c.email_address). - ... label('title')).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name.between('m', 'z')).\ - ... where( - ... or_( - ... addresses.c.email_address.like('%@aol.com'), - ... addresses.c.email_address.like('%@msn.com') - ... ) - ... ) + >>> s = ( + ... select((users.c.fullname + ", " + addresses.c.email_address).label("title")) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name.between("m", "z")) + ... .where( + ... or_( + ... addresses.c.email_address.like("%@aol.com"), + ... addresses.c.email_address.like("%@msn.com"), + ... ) + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS title FROM users, addresses @@ -995,12 +1012,13 @@ unchanged. Below, we create a :func:`_expression.text` object and execute it: >>> from sqlalchemy.sql import text >>> s = text( ... "SELECT users.fullname || ', ' || addresses.email_address AS title " - ... "FROM users, addresses " - ... "WHERE users.id = addresses.user_id " - ... "AND users.name BETWEEN :x AND :y " - ... "AND (addresses.email_address LIKE :e1 " - ... "OR addresses.email_address LIKE :e2)") - >>> conn.execute(s, {"x":"m", "y":"z", "e1":"%@aol.com", "e2":"%@msn.com"}).fetchall() + ... "FROM users, addresses " + ... "WHERE users.id = addresses.user_id " + ... "AND users.name BETWEEN :x AND :y " + ... "AND (addresses.email_address LIKE :e1 " + ... "OR addresses.email_address LIKE :e2)" + ... ) + >>> conn.execute(s, {"x": "m", "y": "z", "e1": "%@aol.com", "e2": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND @@ -1060,8 +1078,7 @@ When we call the :meth:`_expression.TextClause.columns` method, we get back a j = stmt.join(addresses, stmt.c.id == addresses.c.user_id) - new_stmt = select(stmt.c.id, addresses.c.id).\ - select_from(j).where(stmt.c.name == 'x') + new_stmt = select(stmt.c.id, addresses.c.id).select_from(j).where(stmt.c.name == "x") The positional form of :meth:`_expression.TextClause.columns` is particularly useful when relating textual SQL to existing Core or ORM models, because we can use @@ -1070,16 +1087,18 @@ result column names in the textual SQL: .. sourcecode:: pycon+sql - >>> stmt = text("SELECT users.id, addresses.id, users.id, " + >>> stmt = text( + ... "SELECT users.id, addresses.id, users.id, " ... "users.name, addresses.email_address AS email " ... "FROM users JOIN addresses ON users.id=addresses.user_id " - ... "WHERE users.id = 1").columns( - ... users.c.id, - ... addresses.c.id, - ... addresses.c.user_id, - ... users.c.name, - ... addresses.c.email_address - ... ) + ... "WHERE users.id = 1" + ... ).columns( + ... users.c.id, + ... addresses.c.id, + ... addresses.c.user_id, + ... users.c.name, + ... addresses.c.email_address, + ... ) >>> result = conn.execute(stmt) {opensql}SELECT users.id, addresses.id, users.id, users.name, addresses.email_address AS email @@ -1143,18 +1162,20 @@ need to refer to any pre-established :class:`_schema.Table` metadata: .. sourcecode:: pycon+sql - >>> s = select( - ... text("users.fullname || ', ' || addresses.email_address AS title") - ... ).\ - ... where( - ... and_( - ... text("users.id = addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x " - ... "OR addresses.email_address LIKE :y)") - ... ) - ... ).select_from(text('users, addresses')) + >>> s = ( + ... select(text("users.fullname || ', ' || addresses.email_address AS title")) + ... .where( + ... and_( + ... text("users.id = addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x " + ... "OR addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(text("users, addresses")) + ... ) >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ', ' || addresses.email_address AS title FROM users, addresses @@ -1197,22 +1218,27 @@ be quoted: >>> from sqlalchemy import select, and_, text, String >>> from sqlalchemy.sql import table, literal_column - >>> s = select( - ... literal_column("users.fullname", String) + - ... ', ' + - ... literal_column("addresses.email_address").label("title") - ... ).\ - ... where( - ... and_( - ... literal_column("users.id") == literal_column("addresses.user_id"), - ... text("users.name BETWEEN 'm' AND 'z'"), - ... text( - ... "(addresses.email_address LIKE :x OR " - ... "addresses.email_address LIKE :y)") - ... ) - ... ).select_from(table('users')).select_from(table('addresses')) - - >>> conn.execute(s, {"x":"%@aol.com", "y":"%@msn.com"}).fetchall() + >>> s = ( + ... select( + ... literal_column("users.fullname", String) + ... + ", " + ... + literal_column("addresses.email_address").label("title") + ... ) + ... .where( + ... and_( + ... literal_column("users.id") == literal_column("addresses.user_id"), + ... text("users.name BETWEEN 'm' AND 'z'"), + ... text( + ... "(addresses.email_address LIKE :x OR " + ... "addresses.email_address LIKE :y)" + ... ), + ... ) + ... ) + ... .select_from(table("users")) + ... .select_from(table("addresses")) + ... ) + + >>> conn.execute(s, {"x": "%@aol.com", "y": "%@msn.com"}).fetchall() {opensql}SELECT users.fullname || ? || addresses.email_address AS anon_1 FROM users, addresses WHERE users.id = addresses.user_id @@ -1239,10 +1265,11 @@ are rendered fully: .. sourcecode:: pycon+sql >>> from sqlalchemy import func - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", "num_addresses") + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", "num_addresses") + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1256,10 +1283,11 @@ name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... addresses.c.user_id, - ... func.count(addresses.c.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(addresses.c.user_id, func.count(addresses.c.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) {sql}>>> conn.execute(stmt).fetchall() SELECT addresses.user_id, count(addresses.id) AS num_addresses @@ -1278,9 +1306,9 @@ by a column name that appears more than once: .. sourcecode:: pycon+sql >>> u1a, u1b = users.alias(), users.alias() - >>> stmt = select(u1a, u1b).\ - ... where(u1a.c.name > u1b.c.name).\ - ... order_by(u1a.c.name) # using "name" here would be ambiguous + >>> stmt = ( + ... select(u1a, u1b).where(u1a.c.name > u1b.c.name).order_by(u1a.c.name) + ... ) # using "name" here would be ambiguous {sql}>>> conn.execute(stmt).fetchall() SELECT users_1.id, users_1.name, users_1.fullname, users_2.id AS id_1, @@ -1325,13 +1353,14 @@ once for each address. We create two :class:`_expression.Alias` constructs aga >>> a1 = addresses.alias() >>> a2 = addresses.alias() - >>> s = select(users).\ - ... where(and_( - ... users.c.id == a1.c.user_id, - ... users.c.id == a2.c.user_id, - ... a1.c.email_address == 'jack@msn.com', - ... a2.c.email_address == 'jack@yahoo.com' - ... )) + >>> s = select(users).where( + ... and_( + ... users.c.id == a1.c.user_id, + ... users.c.id == a2.c.user_id, + ... a1.c.email_address == "jack@msn.com", + ... a2.c.email_address == "jack@yahoo.com", + ... ) + ... ) >>> conn.execute(s).fetchall() {opensql}SELECT users.id, users.name, users.fullname FROM users, addresses AS addresses_1, addresses AS addresses_2 @@ -1355,7 +1384,7 @@ itself, we don't need to be concerned about the generated name. However, for the purposes of debugging, it can be specified by passing a string name to the :meth:`_expression.FromClause.alias` method:: - >>> a1 = addresses.alias('a1') + >>> a1 = addresses.alias("a1") SELECT-oriented constructs which extend from :class:`_expression.SelectBase` may be turned into aliased subqueries using the :meth:`_expression.SelectBase.subquery` method, which @@ -1417,10 +1446,7 @@ username: .. sourcecode:: pycon+sql - >>> print(users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%') - ... ) - ... ) + >>> print(users.join(addresses, addresses.c.email_address.like(users.c.name + "%"))) users JOIN addresses ON addresses.email_address LIKE users.name || :name_1 When we create a :func:`_expression.select` construct, SQLAlchemy looks around at the @@ -1431,9 +1457,8 @@ here we make use of the :meth:`_expression.Select.select_from` method: .. sourcecode:: pycon+sql >>> s = select(users.c.fullname).select_from( - ... users.join(addresses, - ... addresses.c.email_address.like(users.c.name + '%')) - ... ) + ... users.join(addresses, addresses.c.email_address.like(users.c.name + "%")) + ... ) {sql}>>> conn.execute(s).fetchall() SELECT users.fullname FROM users JOIN addresses ON addresses.email_address LIKE users.name || ? @@ -1486,8 +1511,12 @@ typically acquires using the :meth:`_expression.Select.cte` method on a .. sourcecode:: pycon+sql - >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == 'wendy').cte() - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = select(users.c.id, users.c.name).where(users.c.name == "wendy").cte() + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH anon_1 AS (SELECT users.id AS id, users.name AS name @@ -1523,8 +1552,14 @@ this form looks like: >>> users_cte = select(users.c.id, users.c.name).cte(recursive=True) >>> users_recursive = users_cte.alias() - >>> users_cte = users_cte.union(select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id)) - >>> stmt = select(addresses).where(addresses.c.user_id == users_cte.c.id).order_by(addresses.c.id) + >>> users_cte = users_cte.union( + ... select(users.c.id, users.c.name).where(users.c.id > users_recursive.c.id) + ... ) + >>> stmt = ( + ... select(addresses) + ... .where(addresses.c.user_id == users_cte.c.id) + ... .order_by(addresses.c.id) + ... ) >>> conn.execute(stmt).fetchall() {opensql}WITH RECURSIVE anon_1(id, name) AS (SELECT users.id AS id, users.name AS name @@ -1562,7 +1597,7 @@ at execution time, as here where it converts to positional for SQLite: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import bindparam - >>> s = users.select().where(users.c.name == bindparam('username')) + >>> s = users.select().where(users.c.name == bindparam("username")) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1577,7 +1612,9 @@ off to the database: .. sourcecode:: pycon+sql - >>> s = users.select().where(users.c.name.like(bindparam('username', type_=String) + text("'%'"))) + >>> s = users.select().where( + ... users.c.name.like(bindparam("username", type_=String) + text("'%'")) + ... ) {sql}>>> conn.execute(s, {"username": "wendy"}).fetchall() SELECT users.id, users.name, users.fullname FROM users @@ -1591,17 +1628,19 @@ single named value is needed in the execute parameters: .. sourcecode:: pycon+sql - >>> s = select(users, addresses).\ - ... where( - ... or_( - ... users.c.name.like( - ... bindparam('name', type_=String) + text("'%'")), - ... addresses.c.email_address.like( - ... bindparam('name', type_=String) + text("'@%'")) - ... ) - ... ).\ - ... select_from(users.outerjoin(addresses)).\ - ... order_by(addresses.c.id) + >>> s = ( + ... select(users, addresses) + ... .where( + ... or_( + ... users.c.name.like(bindparam("name", type_=String) + text("'%'")), + ... addresses.c.email_address.like( + ... bindparam("name", type_=String) + text("'@%'") + ... ), + ... ) + ... ) + ... .select_from(users.outerjoin(addresses)) + ... .order_by(addresses.c.id) + ... ) {sql}>>> conn.execute(s, {"name": "jack"}).fetchall() SELECT users.id, users.name, users.fullname, addresses.id AS id_1, addresses.user_id, addresses.email_address @@ -1629,7 +1668,7 @@ generates functions using attribute access: >>> print(func.now()) now() - >>> print(func.concat('x', 'y')) + >>> print(func.concat("x", "y")) concat(:concat_1, :concat_2) By "generates", we mean that **any** SQL function is created based on the word @@ -1657,7 +1696,6 @@ as date and numeric coercions, the type may need to be specified explicitly:: stmt = select(func.date(some_table.c.date_string, type_=Date)) - Functions are most typically used in the columns clause of a select statement, and can also be labeled as well as given a type. Labeling a function is recommended so that the result can be targeted in a result row based on a @@ -1670,11 +1708,8 @@ not important in this case: .. sourcecode:: pycon+sql >>> conn.execute( - ... select( - ... func.max(addresses.c.email_address, type_=String). - ... label('maxemail') - ... ) - ... ).scalar() + ... select(func.max(addresses.c.email_address, type_=String).label("maxemail")) + ... ).scalar() {opensql}SELECT max(addresses.email_address) AS maxemail FROM addresses [...] () @@ -1690,13 +1725,9 @@ well as bind parameters: .. sourcecode:: pycon+sql >>> from sqlalchemy.sql import column - >>> calculate = select(column('q'), column('z'), column('r')).\ - ... select_from( - ... func.calculate( - ... bindparam('x'), - ... bindparam('y') - ... ) - ... ) + >>> calculate = select(column("q"), column("z"), column("r")).select_from( + ... func.calculate(bindparam("x"), bindparam("y")) + ... ) >>> calc = calculate.alias() >>> print(select(users).where(users.c.id > calc.c.z)) SELECT users.id, users.name, users.fullname @@ -1712,10 +1743,9 @@ of our selectable: .. sourcecode:: pycon+sql - >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45) - >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12) - >>> s = select(users).\ - ... where(users.c.id.between(calc1.c.z, calc2.c.z)) + >>> calc1 = calculate.alias("c1").unique_params(x=17, y=45) + >>> calc2 = calculate.alias("c2").unique_params(x=5, y=12) + >>> s = select(users).where(users.c.id.between(calc1.c.z, calc2.c.z)) >>> print(s) SELECT users.id, users.name, users.fullname FROM users, @@ -1723,7 +1753,7 @@ of our selectable: (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2 WHERE users.id BETWEEN c1.z AND c2.z - >>> s.compile().params # doctest: +SKIP + >>> s.compile().params # doctest: +SKIP {u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17} .. seealso:: @@ -1739,10 +1769,7 @@ Any :class:`.FunctionElement`, including functions generated by :data:`~.expression.func`, can be turned into a "window function", that is an OVER clause, using the :meth:`.FunctionElement.over` method:: - >>> s = select( - ... users.c.id, - ... func.row_number().over(order_by=users.c.name) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name)) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1 FROM users @@ -1751,12 +1778,7 @@ OVER clause, using the :meth:`.FunctionElement.over` method:: either the :paramref:`.expression.over.rows` or :paramref:`.expression.over.range` parameters:: - >>> s = select( - ... users.c.id, - ... func.row_number().over( - ... order_by=users.c.name, - ... rows=(-2, None)) - ... ) + >>> s = select(users.c.id, func.row_number().over(order_by=users.c.name, rows=(-2, None))) >>> print(s) SELECT users.id, row_number() OVER (ORDER BY users.name ROWS BETWEEN :param_1 PRECEDING AND UNBOUNDED FOLLOWING) AS anon_1 @@ -1830,11 +1852,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 @@ -1856,10 +1874,8 @@ module level functions :func:`_expression.union` and >>> from sqlalchemy.sql import union >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ).order_by(addresses.c.email_address) {sql}>>> conn.execute(u).fetchall() @@ -1882,10 +1898,8 @@ Also available, though not supported on all databases, are >>> from sqlalchemy.sql import except_ >>> u = except_( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@%.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) + ... addresses.select().where(addresses.c.email_address.like("%@%.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() @@ -1910,13 +1924,13 @@ want the "union" to be stated as a subquery: .. sourcecode:: pycon+sql >>> u = except_( - ... union( - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@msn.com')) - ... ).subquery().select(), # apply subquery here - ... addresses.select().where(addresses.c.email_address.like('%@msn.com')) + ... union( + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), + ... ) + ... .subquery() + ... .select(), # apply subquery here + ... addresses.select().where(addresses.c.email_address.like("%@msn.com")), ... ) {sql}>>> conn.execute(u).fetchall() SELECT anon_1.id, anon_1.user_id, anon_1.email_address @@ -1966,10 +1980,8 @@ selected from the first SELECT; the SQLAlchemy compiler will ensure these will be rendered without table names:: >>> u = union( - ... addresses.select(). - ... where(addresses.c.email_address == 'foo@bar.com'), - ... addresses.select(). - ... where(addresses.c.email_address.like('%@yahoo.com')), + ... addresses.select().where(addresses.c.email_address == "foo@bar.com"), + ... addresses.select().where(addresses.c.email_address.like("%@yahoo.com")), ... ) >>> u = u.order_by(u.selected_columns.email_address) >>> print(u) @@ -1997,9 +2009,11 @@ or :meth:`_expression.SelectBase.label` method: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .scalar_subquery() + ... ) The above construct is now a :class:`_expression.ScalarSelect` object, which is an adapter around the original :class:`.~expression.Select` @@ -2022,9 +2036,11 @@ it using :meth:`_expression.SelectBase.label` instead: .. sourcecode:: pycon+sql - >>> subq = select(func.count(addresses.c.id)).\ - ... where(users.c.id == addresses.c.user_id).\ - ... label("address_count") + >>> subq = ( + ... select(func.count(addresses.c.id)) + ... .where(users.c.id == addresses.c.user_id) + ... .label("address_count") + ... ) >>> conn.execute(select(users.c.name, subq)).fetchall() {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1 FROM addresses @@ -2052,11 +2068,12 @@ still have at least one FROM clause of its own. For example: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.user_id).\ - ... where(addresses.c.user_id == users.c.id).\ - ... where(addresses.c.email_address == 'jack@yahoo.com') - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(addresses.c.user_id) + ... .where(addresses.c.user_id == users.c.id) + ... .where(addresses.c.email_address == "jack@yahoo.com") + ... ) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2075,14 +2092,17 @@ may be correlated: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate(addresses) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate(addresses) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2097,11 +2117,8 @@ as the argument: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.name == 'wendy').\ - ... correlate(None) - >>> enclosing_stmt = select(users.c.name).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = select(users.c.id).where(users.c.name == "wendy").correlate(None) + >>> enclosing_stmt = select(users.c.name).where(users.c.id == stmt.scalar_subquery()) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name FROM users @@ -2117,14 +2134,17 @@ by telling it to correlate all FROM clauses except for ``users``: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.id).\ - ... where(users.c.id == addresses.c.user_id).\ - ... where(users.c.name == 'jack').\ - ... correlate_except(users) - >>> enclosing_stmt = select( - ... users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... where(users.c.id == stmt.scalar_subquery()) + >>> stmt = ( + ... select(users.c.id) + ... .where(users.c.id == addresses.c.user_id) + ... .where(users.c.name == "jack") + ... .correlate_except(users) + ... ) + >>> enclosing_stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .where(users.c.id == stmt.scalar_subquery()) + ... ) >>> conn.execute(enclosing_stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2165,10 +2185,13 @@ to the left side of the JOIN. SQLAlchemy Core supports a statement like the above using the :meth:`_expression.Select.lateral` method as follows:: >>> from sqlalchemy import table, column, select, true - >>> people = table('people', column('people_id'), column('age'), column('name')) - >>> books = table('books', column('book_id'), column('owner_id')) - >>> subq = select(books.c.book_id).\ - ... where(books.c.owner_id == people.c.people_id).lateral("book_subq") + >>> people = table("people", column("people_id"), column("age"), column("name")) + >>> books = table("books", column("book_id"), column("owner_id")) + >>> subq = ( + ... select(books.c.book_id) + ... .where(books.c.owner_id == people.c.people_id) + ... .lateral("book_subq") + ... ) >>> print(select(people).select_from(people.join(subq, true()))) SELECT people.people_id, people.age, people.name FROM people JOIN LATERAL (SELECT books.book_id AS book_id @@ -2237,9 +2260,11 @@ This is provided via the :meth:`_expression.SelectBase.group_by` method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2257,10 +2282,12 @@ method: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, func.count(addresses.c.id)).\ - ... select_from(users.join(addresses)).\ - ... group_by(users.c.name).\ - ... having(func.length(users.c.name) > 4) + >>> stmt = ( + ... select(users.c.name, func.count(addresses.c.id)) + ... .select_from(users.join(addresses)) + ... .group_by(users.c.name) + ... .having(func.length(users.c.name) > 4) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, count(addresses.id) AS count_1 FROM users JOIN addresses @@ -2276,10 +2303,11 @@ is the DISTINCT modifier. A simple DISTINCT clause can be added using the .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name).\ - ... where(addresses.c.email_address. - ... contains(users.c.name)).\ - ... distinct() + >>> stmt = ( + ... select(users.c.name) + ... .where(addresses.c.email_address.contains(users.c.name)) + ... .distinct() + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT DISTINCT users.name FROM users, addresses @@ -2298,9 +2326,12 @@ into the current backend's methodology: .. sourcecode:: pycon+sql - >>> stmt = select(users.c.name, addresses.c.email_address).\ - ... select_from(users.join(addresses)).\ - ... limit(1).offset(1) + >>> stmt = ( + ... select(users.c.name, addresses.c.email_address) + ... .select_from(users.join(addresses)) + ... .limit(1) + ... .offset(1) + ... ) >>> conn.execute(stmt).fetchall() {opensql}SELECT users.name, addresses.email_address FROM users JOIN addresses ON users.id = addresses.user_id @@ -2326,8 +2357,7 @@ as a value: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... values(fullname="Fullname: " + users.c.name) + >>> stmt = users.update().values(fullname="Fullname: " + users.c.name) >>> conn.execute(stmt) {opensql}UPDATE users SET fullname=(? || users.name) [...] ('Fullname: ',) @@ -2351,13 +2381,15 @@ as in the example below: .. sourcecode:: pycon+sql - >>> stmt = users.insert().\ - ... values(name=bindparam('_name') + " .. name") - >>> conn.execute(stmt, [ - ... {'id':4, '_name':'name1'}, - ... {'id':5, '_name':'name2'}, - ... {'id':6, '_name':'name3'}, - ... ]) + >>> stmt = users.insert().values(name=bindparam("_name") + " .. name") + >>> conn.execute( + ... stmt, + ... [ + ... {"id": 4, "_name": "name1"}, + ... {"id": 5, "_name": "name2"}, + ... {"id": 6, "_name": "name3"}, + ... ], + ... ) {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?)) [...] ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name')) COMMIT @@ -2369,9 +2401,7 @@ that can be specified: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == 'jack').\ - ... values(name='ed') + >>> stmt = users.update().where(users.c.name == "jack").values(name="ed") >>> conn.execute(stmt) {opensql}UPDATE users SET name=? WHERE users.name = ? @@ -2386,14 +2416,19 @@ used to achieve this: .. sourcecode:: pycon+sql - >>> stmt = users.update().\ - ... where(users.c.name == bindparam('oldname')).\ - ... values(name=bindparam('newname')) - >>> conn.execute(stmt, [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ]) + >>> stmt = ( + ... users.update() + ... .where(users.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) + ... ) + >>> conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}UPDATE users SET name=? WHERE users.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) COMMIT @@ -2410,9 +2445,9 @@ subquery using :meth:`_expression.Select.scalar_subquery`: .. sourcecode:: pycon+sql - >>> stmt = select(addresses.c.email_address).\ - ... where(addresses.c.user_id == users.c.id).\ - ... limit(1) + >>> stmt = ( + ... select(addresses.c.email_address).where(addresses.c.user_id == users.c.id).limit(1) + ... ) >>> conn.execute(users.update().values(fullname=stmt.scalar_subquery())) {opensql}UPDATE users SET fullname=(SELECT addresses.email_address FROM addresses @@ -2435,10 +2470,12 @@ multiple tables can be embedded into a single UPDATE statement separated by a co The SQLAlchemy :func:`_expression.update` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.update().\ - values(name='ed wood').\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values(name="ed wood") + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) The resulting SQL from the above statement would render as:: @@ -2450,13 +2487,12 @@ The resulting SQL from the above statement would render as:: When using MySQL, columns from each table can be assigned to in the SET clause directly, using the dictionary form passed to :meth:`_expression.Update.values`:: - stmt = users.update().\ - values({ - users.c.name:'ed wood', - addresses.c.email_address:'ed.wood@foo.com' - }).\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.update() + .values({users.c.name: "ed wood", addresses.c.email_address: "ed.wood@foo.com"}) + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) The tables are referenced explicitly in the SET clause:: @@ -2506,8 +2542,9 @@ To suit this specific use case, the we supply a **series of 2-tuples** as the argument to the method:: - stmt = some_table.update().\ - ordered_values((some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10)) + stmt = some_table.update().ordered_values( + (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) + ) The series of 2-tuples is essentially the same structure as a Python dictionary, except that it explicitly suggests a specific ordering. Using the @@ -2539,7 +2576,7 @@ Finally, a delete. This is accomplished easily enough using the COMMIT {stop} - >>> conn.execute(users.delete().where(users.c.name > 'm')) + >>> conn.execute(users.delete().where(users.c.name > "m")) {opensql}DELETE FROM users WHERE users.name > ? [...] ('m',) COMMIT @@ -2559,9 +2596,11 @@ and MySQL, this is the "DELETE USING" syntax, and for SQL Server, it's a :func:`_expression.delete` construct supports both of these modes implicitly, by specifying multiple tables in the WHERE clause:: - stmt = users.delete().\ - where(users.c.id == addresses.c.id).\ - where(addresses.c.email_address.startswith('ed%')) + stmt = ( + users.delete() + .where(users.c.id == addresses.c.id) + .where(addresses.c.email_address.startswith("ed%")) + ) conn.execute(stmt) On a PostgreSQL backend, the resulting SQL from the above statement would render as:: diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 49fc715f06f..62d941e6639 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -43,10 +43,10 @@ values to and from the database, as in the example below:: metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('user_name', String, primary_key=True), - Column('email_address', String(60)), + Column("user_name", String, primary_key=True), + Column("email_address", String(60)), ) When using a particular :class:`_types.TypeEngine` class in a diff --git a/doc/build/dialects/mssql.rst b/doc/build/dialects/mssql.rst index 7484000dbcc..6fd573c8d3d 100644 --- a/doc/build/dialects/mssql.rst +++ b/doc/build/dialects/mssql.rst @@ -19,12 +19,38 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQL server are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.mssql import \ - BIGINT, BINARY, BIT, CHAR, DATE, DATETIME, DATETIME2, \ - DATETIMEOFFSET, DECIMAL, FLOAT, IMAGE, INTEGER, JSON, MONEY, \ - NCHAR, NTEXT, NUMERIC, NVARCHAR, REAL, SMALLDATETIME, \ - SMALLINT, SMALLMONEY, SQL_VARIANT, TEXT, TIME, \ - TIMESTAMP, TINYINT, UNIQUEIDENTIFIER, VARBINARY, VARCHAR + from sqlalchemy.dialects.mssql import ( + BIGINT, + BINARY, + BIT, + CHAR, + DATE, + DATETIME, + DATETIME2, + DATETIMEOFFSET, + DECIMAL, + FLOAT, + IMAGE, + INTEGER, + JSON, + MONEY, + NCHAR, + NTEXT, + NUMERIC, + NVARCHAR, + REAL, + SMALLDATETIME, + SMALLINT, + SMALLMONEY, + SQL_VARIANT, + TEXT, + TIME, + TIMESTAMP, + TINYINT, + UNIQUEIDENTIFIER, + VARBINARY, + VARCHAR, + ) Types which are specific to SQL Server, or have SQL Server-specific construction arguments, are as follows: diff --git a/doc/build/dialects/mysql.rst b/doc/build/dialects/mysql.rst index c506a5fa43b..52dd45cfac2 100644 --- a/doc/build/dialects/mysql.rst +++ b/doc/build/dialects/mysql.rst @@ -19,12 +19,42 @@ MySQL Data Types As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with MySQL are importable from the top level dialect:: - from sqlalchemy.dialects.mysql import \ - BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \ - LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \ - TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR + from sqlalchemy.dialects.mysql import ( + BIGINT, + BINARY, + BIT, + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + DECIMAL, + DOUBLE, + ENUM, + FLOAT, + INTEGER, + LONGBLOB, + LONGTEXT, + MEDIUMBLOB, + MEDIUMINT, + MEDIUMTEXT, + NCHAR, + NUMERIC, + NVARCHAR, + REAL, + SET, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + TINYBLOB, + TINYINT, + TINYTEXT, + VARBINARY, + VARCHAR, + YEAR, + ) Types which are specific to MySQL, or have MySQL-specific construction arguments, are as follows: diff --git a/doc/build/dialects/oracle.rst b/doc/build/dialects/oracle.rst index 81cef78d272..d992a2f83b0 100644 --- a/doc/build/dialects/oracle.rst +++ b/doc/build/dialects/oracle.rst @@ -12,11 +12,26 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with Oracle are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.oracle import \ - BFILE, BLOB, CHAR, CLOB, DATE, \ - DOUBLE_PRECISION, FLOAT, INTERVAL, LONG, NCLOB, NCHAR, \ - NUMBER, NVARCHAR, NVARCHAR2, RAW, TIMESTAMP, VARCHAR, \ - VARCHAR2 + from sqlalchemy.dialects.oracle import ( + BFILE, + BLOB, + CHAR, + CLOB, + DATE, + DOUBLE_PRECISION, + FLOAT, + INTERVAL, + LONG, + NCLOB, + NCHAR, + NUMBER, + NVARCHAR, + NVARCHAR2, + RAW, + TIMESTAMP, + VARCHAR, + VARCHAR2, + ) .. versionadded:: 1.2.19 Added :class:`_types.NCHAR` to the list of datatypes exported by the Oracle dialect. diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index c58aaee9b48..4e8fb98d95e 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -12,12 +12,43 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with PostgreSQL are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.postgresql import \ - ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE, \ - DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER, \ - INTERVAL, JSON, JSONB, MACADDR, MONEY, NUMERIC, OID, REAL, SMALLINT, TEXT, \ - TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE, \ - DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR + from sqlalchemy.dialects.postgresql import ( + ARRAY, + BIGINT, + BIT, + BOOLEAN, + BYTEA, + CHAR, + CIDR, + DATE, + DOUBLE_PRECISION, + ENUM, + FLOAT, + HSTORE, + INET, + INTEGER, + INTERVAL, + JSON, + JSONB, + MACADDR, + MONEY, + NUMERIC, + OID, + REAL, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + UUID, + VARCHAR, + INT4RANGE, + INT8RANGE, + NUMRANGE, + DATERANGE, + TSRANGE, + TSTZRANGE, + TSVECTOR, + ) Types which are specific to PostgreSQL, or have PostgreSQL-specific construction arguments, are as follows: @@ -179,16 +210,15 @@ For example:: from sqlalchemy.dialects.postgresql import ExcludeConstraint, TSRANGE + class RoomBooking(Base): - __tablename__ = 'room_booking' + __tablename__ = "room_booking" room = Column(Integer(), primary_key=True) during = Column(TSRANGE()) - __table_args__ = ( - ExcludeConstraint(('room', '='), ('during', '&&')), - ) + __table_args__ = (ExcludeConstraint(("room", "="), ("during", "&&")),) PostgreSQL DML Constructs ------------------------- diff --git a/doc/build/dialects/sqlite.rst b/doc/build/dialects/sqlite.rst index 6d40daf5fe2..d25301fa53f 100644 --- a/doc/build/dialects/sqlite.rst +++ b/doc/build/dialects/sqlite.rst @@ -12,10 +12,23 @@ As with all SQLAlchemy dialects, all UPPERCASE types that are known to be valid with SQLite are importable from the top level dialect, whether they originate from :mod:`sqlalchemy.types` or from the local dialect:: - from sqlalchemy.dialects.sqlite import \ - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, \ - INTEGER, NUMERIC, JSON, SMALLINT, TEXT, TIME, TIMESTAMP, \ - VARCHAR + from sqlalchemy.dialects.sqlite import ( + BLOB, + BOOLEAN, + CHAR, + DATE, + DATETIME, + DECIMAL, + FLOAT, + INTEGER, + NUMERIC, + JSON, + SMALLINT, + TEXT, + TIME, + TIMESTAMP, + VARCHAR, + ) .. module:: sqlalchemy.dialects.sqlite diff --git a/doc/build/errors.rst b/doc/build/errors.rst index 3c0632af692..f270ee3202b 100644 --- a/doc/build/errors.rst +++ b/doc/build/errors.rst @@ -441,7 +441,7 @@ Normally, a Core SQL construct or ORM :class:`_query.Query` object can be string directly, such as when we use ``print()``:: >>> from sqlalchemy import column - >>> print(column('x') == 5) + >>> print(column("x") == 5) x = :x_1 When the above SQL expression is stringified, the :class:`.StrSQLCompiler` @@ -455,11 +455,9 @@ to turn into a string, such as the PostgreSQL >>> from sqlalchemy.dialects.postgresql import insert >>> from sqlalchemy import table, column - >>> my_table = table('my_table', column('x'), column('y')) - >>> insert_stmt = insert(my_table).values(x='foo') - >>> insert_stmt = insert_stmt.on_conflict_do_nothing( - ... index_elements=['y'] - ... ) + >>> my_table = table("my_table", column("x"), column("y")) + >>> insert_stmt = insert(my_table).values(x="foo") + >>> insert_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["y"]) >>> print(insert_stmt) Traceback (most recent call last): @@ -501,14 +499,12 @@ This often occurs when attempting to use a :func:`.column_property` or declarative such as:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop > 5), - ) + __table_args__ = (CheckConstraint(cprop > 5),) Above, the ``cprop`` attribute is used inline before it has been mapped, however this ``cprop`` attribute is not a :class:`_schema.Column`, @@ -527,16 +523,12 @@ The solution is to access the :class:`_schema.Column` directly using the :attr:`.ColumnProperty.expression` attribute:: class Bar(Base): - __tablename__ = 'bar' + __tablename__ = "bar" id = Column(Integer, primary_key=True) cprop = deferred(Column(Integer)) - __table_args__ = ( - CheckConstraint(cprop.expression > 5), - ) - - + __table_args__ = (CheckConstraint(cprop.expression > 5),) .. _error_cd3x: @@ -547,7 +539,7 @@ This error occurs when a statement makes use of :func:`.bindparam` either implicitly or explicitly and does not provide a value when the statement is executed:: - stmt = select(table.c.column).where(table.c.id == bindparam('my_param')) + stmt = select(table.c.column).where(table.c.id == bindparam("my_param")) result = conn.execute(stmt) @@ -594,11 +586,12 @@ this error is generated:: Since "b" is required, pass it as ``None`` so that the INSERT may proceed:: e.execute( - t.insert(), [ + t.insert(), + [ {"a": 1, "b": 2, "c": 3}, {"a": 2, "b": None, "c": 4}, {"a": 3, "b": 4, "c": 5}, - ] + ], ) .. seealso:: @@ -620,12 +613,7 @@ Core and the full rationale is discussed at :ref:`change_4617`. Given an example as:: m = MetaData() - t = Table( - 't', m, - Column('a', Integer), - Column('b', Integer), - Column('c', Integer) - ) + t = Table("t", m, Column("a", Integer), Column("b", Integer), Column("c", Integer)) stmt = select(t) Above, ``stmt`` represents a SELECT statement. The error is produced when we want @@ -678,10 +666,12 @@ construct:: a1 = Address.__table__ - q = s.query(User).\ - join(a1, User.addresses).\ - filter(Address.email_address == 'ed@foo.com').all() - + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(Address.email_address == "ed@foo.com") + .all() + ) The above pattern also allows an arbitrary selectable, such as a Core :class:`_sql.Join` or :class:`_sql.Alias` object, @@ -690,23 +680,26 @@ Core element would need to be referred towards directly:: a1 = Address.__table__.alias() - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.c.email_address == 'ed@foo.com').all() + q = ( + s.query(User) + .join(a1, User.addresses) + .filter(a1.c.email_address == "ed@foo.com") + .all() + ) The correct way to specify a join target is always by using the mapped class itself or an :class:`_orm.aliased` object, in the latter case using the :meth:`_orm.PropComparator.of_type` modifier to set up an alias:: # normal join to relationship entity - q = s.query(User).\ - join(User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = s.query(User).join(User.addresses).filter(Address.email_address == "ed@foo.com") # name Address target explicitly, not necessary but legal - q = s.query(User).\ - join(Address, User.addresses).\ - filter(Address.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(Address, User.addresses) + .filter(Address.email_address == "ed@foo.com") + ) Join to an alias:: @@ -715,15 +708,14 @@ Join to an alias:: a1 = aliased(Address) # of_type() form; recommended - q = s.query(User).\ - join(User.addresses.of_type(a1)).\ - filter(a1.email_address == 'ed@foo.com') + q = ( + s.query(User) + .join(User.addresses.of_type(a1)) + .filter(a1.email_address == "ed@foo.com") + ) # target, onclause form - q = s.query(User).\ - join(a1, User.addresses).\ - filter(a1.email_address == 'ed@foo.com') - + q = s.query(User).join(a1, User.addresses).filter(a1.email_address == "ed@foo.com") .. _error_xaj2: @@ -741,7 +733,7 @@ alias to one side or the other; SQLAlchemy applies an alias to the right side of the join. For example given a joined inheritance mapping as:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) manager_id = Column(ForeignKey("manager.id")) name = Column(String(50)) @@ -750,17 +742,18 @@ of the join. For example given a joined inheritance mapping as:: reports_to = relationship("Manager", foreign_keys=manager_id) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, + "polymorphic_identity": "employee", + "polymorphic_on": type, } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) __mapper_args__ = { - 'polymorphic_identity':'manager', - 'inherit_condition': id == Employee.id + "polymorphic_identity": "manager", + "inherit_condition": id == Employee.id, } The above mapping includes a relationship between the ``Employee`` and @@ -824,10 +817,10 @@ embedding the join into a new subquery: If we then wanted to use :func:`_orm.contains_eager` to populate the ``reports_to`` attribute, we refer to the alias:: - >>> stmt =select(Employee).join( - ... Employee.reports_to.of_type(manager_alias) - ... ).options( - ... contains_eager(Employee.reports_to.of_type(manager_alias)) + >>> stmt = ( + ... select(Employee) + ... .join(Employee.reports_to.of_type(manager_alias)) + ... .options(contains_eager(Employee.reports_to.of_type(manager_alias))) ... ) Without using the explicit :func:`_orm.aliased` object, in some more nested @@ -960,6 +953,7 @@ is set on a many-to-one or many-to-many relationship, such as:: # configuration step occurs a = relationship("A", back_populates="bs", cascade="all, delete-orphan") + configure_mappers() Above, the "delete-orphan" setting on ``B.a`` indicates the intent that @@ -1222,12 +1216,12 @@ items in each case:: "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 0)", backref="parent", - overlaps="c2, parent" + overlaps="c2, parent", ) c2 = relationship( "Child", primaryjoin="and_(Parent.id == Child.parent_id, Child.flag == 1)", - overlaps="c1, parent" + overlaps="c1, parent", ) @@ -1238,7 +1232,6 @@ items in each case:: flag = Column(Integer) - Above, the ORM will know that the overlap between ``Parent.c1``, ``Parent.c2`` and ``Child.parent`` is intentional. @@ -1289,8 +1282,7 @@ the ``prebuffer_rows`` execution option may be used as follows:: # result internally pre-fetches all objects result = sess.execute( - select(User).where(User.id == 7), - execution_options={"prebuffer_rows": True} + select(User).where(User.id == 7), execution_options={"prebuffer_rows": True} ) # context manager is closed, so session_obj above is closed, identity @@ -1577,10 +1569,10 @@ the :meth:`.Executable.execute` method directly off of a Core expression object that is not associated with any :class:`_engine.Engine`:: metadata_obj = MetaData() - table = Table('t', metadata_obj, Column('q', Integer)) + table = Table("t", metadata_obj, Column("q", Integer)) stmt = select(table) - result = stmt.execute() # <--- raises + result = stmt.execute() # <--- raises What the logic is expecting is that the :class:`_schema.MetaData` object has been **bound** to a :class:`_engine.Engine`:: @@ -1597,7 +1589,7 @@ The correct way to invoke statements is via the :meth:`_engine.Connection.execute` method of a :class:`_engine.Connection`:: with engine.connect() as conn: - result = conn.execute(stmt) + result = conn.execute(stmt) When using the ORM, a similar facility is available via the :class:`.Session`:: diff --git a/doc/build/faq/connections.rst b/doc/build/faq/connections.rst index 27ba5f4ed5c..fe8e56f815d 100644 --- a/doc/build/faq/connections.rst +++ b/doc/build/faq/connections.rst @@ -27,8 +27,9 @@ How do I pass custom connect arguments to my database API? The :func:`_sa.create_engine` call accepts additional arguments either directly via the ``connect_args`` keyword argument:: - e = create_engine("mysql://scott:tiger@localhost/test", - connect_args={"encoding": "utf8"}) + e = create_engine( + "mysql://scott:tiger@localhost/test", connect_args={"encoding": "utf8"} + ) Or for basic string and integer arguments, they can usually be specified in the query string of the URL:: @@ -256,9 +257,7 @@ statement executions:: fn(cursor_obj, statement, context=context, *arg) except engine.dialect.dbapi.Error as raw_dbapi_err: connection = context.root_connection - if engine.dialect.is_disconnect( - raw_dbapi_err, connection, cursor_obj - ): + if engine.dialect.is_disconnect(raw_dbapi_err, connection, cursor_obj): if retry > num_retries: raise engine.logger.error( @@ -316,9 +315,7 @@ using the following proof of concept script. Once run, it will emit a time.sleep(5) e = reconnecting_engine( - create_engine( - "mysql://scott:tiger@localhost/test", echo_pool=True - ), + create_engine("mysql://scott:tiger@localhost/test", echo_pool=True), num_retries=5, retry_interval=2, ) @@ -374,7 +371,10 @@ configured using ``reset_on_return``:: from sqlalchemy import create_engine from sqlalchemy.pool import QueuePool - engine = create_engine('mysql://scott:tiger@localhost/myisam_database', pool=QueuePool(reset_on_return=False)) + engine = create_engine( + "mysql://scott:tiger@localhost/myisam_database", + pool=QueuePool(reset_on_return=False), + ) I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -383,8 +383,9 @@ I'm on SQL Server - how do I turn those ROLLBACKs into COMMITs? to ``True``, ``False``, and ``None``. Setting to ``commit`` will cause a COMMIT as any connection is returned to the pool:: - engine = create_engine('mssql://scott:tiger@mydsn', pool=QueuePool(reset_on_return='commit')) - + engine = create_engine( + "mssql://scott:tiger@mydsn", pool=QueuePool(reset_on_return="commit") + ) I am using multiple connections with a SQLite database (typically to test transaction operation), and my test program is not working! ---------------------------------------------------------------------------------------------------------------------------------------------------------- diff --git a/doc/build/faq/metadata_schema.rst b/doc/build/faq/metadata_schema.rst index 2556db60c1a..2eab0033a5e 100644 --- a/doc/build/faq/metadata_schema.rst +++ b/doc/build/faq/metadata_schema.rst @@ -88,9 +88,12 @@ metadata creation sequence as a string, using this recipe:: from sqlalchemy import create_mock_engine + def dump(sql, *multiparams, **params): print(sql.compile(dialect=engine.dialect)) - engine = create_mock_engine('postgresql://', dump) + + + engine = create_mock_engine("postgresql://", dump) metadata_obj.create_all(engine, checkfirst=False) The `Alembic `_ tool also supports diff --git a/doc/build/faq/ormconfiguration.rst b/doc/build/faq/ormconfiguration.rst index f257f7ce998..1059354ed84 100644 --- a/doc/build/faq/ormconfiguration.rst +++ b/doc/build/faq/ormconfiguration.rst @@ -48,7 +48,7 @@ applied directly to the mapper:: class SomeClass(Base): __table__ = some_table_with_no_pk __mapper_args__ = { - 'primary_key':[some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] + "primary_key": [some_table_with_no_pk.c.uid, some_table_with_no_pk.c.bar] } Better yet is when using fully declared table metadata, use the ``primary_key=True`` @@ -142,16 +142,18 @@ Given the example as follows:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + a_id = Column(Integer, ForeignKey("a.id")) As of SQLAlchemy version 0.9.5, the above condition is detected, and will warn that the ``id`` column of ``A`` and ``B`` is being combined under @@ -161,33 +163,33 @@ that a ``B`` object's primary key will always mirror that of its ``A``. A mapping which resolves this is as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" - b_id = Column('id', Integer, primary_key=True) - a_id = Column(Integer, ForeignKey('a.id')) + b_id = Column("id", Integer, primary_key=True) + a_id = Column(Integer, ForeignKey("a.id")) Suppose we did want ``A.id`` and ``B.id`` to be mirrors of each other, despite the fact that ``B.a_id`` is where ``A.id`` is related. We could combine them together using :func:`.column_property`:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) + class B(A): - __tablename__ = 'b' + __tablename__ = "b" # probably not what you want, but this is a demonstration id = column_property(Column(Integer, primary_key=True), A.id) - a_id = Column(Integer, ForeignKey('a.id')) - - + a_id = Column(Integer, ForeignKey("a.id")) I'm using Declarative and setting primaryjoin/secondaryjoin using an ``and_()`` or ``or_()``, and I am getting an error message about foreign keys. ------------------------------------------------------------------------------------------------------------------------------------------------------------------ @@ -197,21 +199,27 @@ Are you doing this?:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar")) + foo = relationship( + "Dest", primaryjoin=and_("MyClass.id==Dest.foo_id", "MyClass.foo==Dest.bar") + ) That's an ``and_()`` of two string expressions, which SQLAlchemy cannot apply any mapping towards. Declarative allows :func:`_orm.relationship` arguments to be specified as strings, which are converted into expression objects using ``eval()``. But this doesn't occur inside of an ``and_()`` expression - it's a special operation declarative applies only to the *entirety* of what's passed to primaryjoin or other arguments as a string:: class MyClass(Base): # .... - foo = relationship("Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)") + foo = relationship( + "Dest", primaryjoin="and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)" + ) Or if the objects you need are already available, skip the strings:: class MyClass(Base): # .... - foo = relationship(Dest, primaryjoin=and_(MyClass.id==Dest.foo_id, MyClass.foo==Dest.bar)) + foo = relationship( + Dest, primaryjoin=and_(MyClass.id == Dest.foo_id, MyClass.foo == Dest.bar) + ) The same idea applies to all the other arguments, such as ``foreign_keys``:: diff --git a/doc/build/faq/performance.rst b/doc/build/faq/performance.rst index 781d6c79d34..91061c85927 100644 --- a/doc/build/faq/performance.rst +++ b/doc/build/faq/performance.rst @@ -215,16 +215,16 @@ using a recipe like the following:: logger = logging.getLogger("myapp.sqltime") logger.setLevel(logging.DEBUG) + @event.listens_for(Engine, "before_cursor_execute") - def before_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - conn.info.setdefault('query_start_time', []).append(time.time()) + def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): + conn.info.setdefault("query_start_time", []).append(time.time()) logger.debug("Start Query: %s", statement) + @event.listens_for(Engine, "after_cursor_execute") - def after_cursor_execute(conn, cursor, statement, - parameters, context, executemany): - total = time.time() - conn.info['query_start_time'].pop(-1) + def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): + total = time.time() - conn.info["query_start_time"].pop(-1) logger.debug("Query Complete!") logger.debug("Total Time: %f", total) @@ -255,6 +255,7 @@ Below is a simple recipe which works profiling into a context manager:: import pstats import contextlib + @contextlib.contextmanager def profiled(): pr = cProfile.Profile() @@ -262,7 +263,7 @@ Below is a simple recipe which works profiling into a context manager:: yield pr.disable() s = io.StringIO() - ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') + ps = pstats.Stats(pr, stream=s).sort_stats("cumulative") ps.print_stats() # uncomment this to see who's calling what # ps.print_callers() @@ -271,7 +272,7 @@ Below is a simple recipe which works profiling into a context manager:: To profile a section of code:: with profiled(): - Session.query(FooClass).filter(FooClass.somevalue==8).all() + Session.query(FooClass).filter(FooClass.somevalue == 8).all() The output of profiling can be used to give an idea where time is being spent. A section of profiling output looks like this:: @@ -357,12 +358,13 @@ this:: from sqlalchemy import TypeDecorator import time + class Foo(TypeDecorator): impl = String def process_result_value(self, value, thing): # intentionally add slowness for illustration purposes - time.sleep(.001) + time.sleep(0.001) return value the profiling output of this intentionally slow operation can be seen like this:: diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index 1145a408fad..c070781981b 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -91,12 +91,14 @@ does not properly handle the exception. For example:: from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base(create_engine('sqlite://')) + Base = declarative_base(create_engine("sqlite://")) + class Foo(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) + Base.metadata.create_all() session = sessionmaker()() @@ -113,7 +115,6 @@ does not properly handle the exception. For example:: # continue using session without rolling back session.commit() - The usage of the :class:`.Session` should fit within a structure similar to this:: try: @@ -186,7 +187,7 @@ point of view there is still a transaction that is now in an inactive state. Given a block such as:: - sess = Session() # begins a logical transaction + sess = Session() # begins a logical transaction try: sess.flush() @@ -237,7 +238,7 @@ will **deduplicate the objects based on primary key**. That is, if we for example use the ``User`` mapping described at :ref:`ormtutorial_toplevel`, and we had a SQL query like the following:: - q = session.query(User).outerjoin(User.addresses).filter(User.name == 'jack') + q = session.query(User).outerjoin(User.addresses).filter(User.name == "jack") Above, the sample data used in the tutorial has two rows in the ``addresses`` table for the ``users`` row with the name ``'jack'``, primary key value 5. @@ -257,7 +258,9 @@ This is because when the :class:`_query.Query` object returns full entities, the are **deduplicated**. This does not occur if we instead request individual columns back:: - >>> session.query(User.id, User.name).outerjoin(User.addresses).filter(User.name == 'jack').all() + >>> session.query(User.id, User.name).outerjoin(User.addresses).filter( + ... User.name == "jack" + ... ).all() [(5, 'jack'), (5, 'jack')] There are two main reasons the :class:`_query.Query` will deduplicate: @@ -338,6 +341,7 @@ one:: print("ITER!") return iter([1, 2, 3, 4, 5]) + list(Iterates()) output:: @@ -422,7 +426,7 @@ be performed for any :term:`persistent` object using :meth:`.Session.expire`:: o = Session.query(SomeClass).first() o.foo_id = 7 - Session.expire(o, ['foo']) # object must be persistent for this + Session.expire(o, ["foo"]) # object must be persistent for this foo_7 = Session.query(Foo).get(7) @@ -444,11 +448,10 @@ have meaning until the row is inserted; otherwise there is no row yet:: Session.flush() # emits INSERT # expire this because we already set .foo to None - Session.expire(o, ['foo']) + Session.expire(o, ["foo"]) assert new_obj.foo is foo_7 # now it loads - .. topic:: Attribute loading for non-persistent objects One variant on the "pending" behavior above is if we use the flag @@ -504,21 +507,21 @@ The function can be demonstrated as follows:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B", backref="a") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - c_id = Column(ForeignKey('c.id')) + a_id = Column(ForeignKey("a.id")) + c_id = Column(ForeignKey("c.id")) c = relationship("C", backref="bs") class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) diff --git a/doc/build/faq/sqlexpressions.rst b/doc/build/faq/sqlexpressions.rst index 5dcf3e96ad2..287647a7936 100644 --- a/doc/build/faq/sqlexpressions.rst +++ b/doc/build/faq/sqlexpressions.rst @@ -19,7 +19,7 @@ function (note the Python ``print`` function also calls ``str()`` automatically if we don't use it explicitly):: >>> from sqlalchemy import table, column, select - >>> t = table('my_table', column('x')) + >>> t = table("my_table", column("x")) >>> statement = select(t) >>> print(str(statement)) SELECT my_table.x @@ -31,7 +31,7 @@ The ``str()`` builtin, or an equivalent, can be invoked on ORM as:: >>> from sqlalchemy import column - >>> print(column('x') == 'some value') + >>> print(column("x") == "some value") x = :x_1 Stringifying for Specific Databases @@ -59,6 +59,7 @@ instantiate a :class:`.Dialect` object directly, as below where we use a PostgreSQL dialect:: from sqlalchemy.dialects import postgresql + print(statement.compile(dialect=postgresql.dialect())) Note that any dialect can be assembled using :func:`_sa.create_engine` itself @@ -98,7 +99,7 @@ flag, passed to ``compile_kwargs``:: from sqlalchemy.sql import table, column, select - t = table('t', column('x')) + t = table("t", column("x")) s = select(t).where(t.c.x == 5) @@ -159,12 +160,14 @@ datatype:: Base = declarative_base() + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(UUID) + stmt = select(A).where(A.data == uuid.uuid4()) Given the above model and statement which will compare a column to a single @@ -216,6 +219,7 @@ include: their positional order for the statement as compiled:: import re + e = create_engine("sqlite+pysqlite://") # will use qmark style, i.e. ? for param @@ -224,7 +228,7 @@ include: # params in positional order params = (repr(compiled.params[name]) for name in compiled.positiontup) - print(re.sub(r'\?', lambda m: next(params), str(compiled))) + print(re.sub(r"\?", lambda m: next(params), str(compiled))) The above snippet prints:: @@ -240,6 +244,7 @@ include: from sqlalchemy.ext.compiler import compiles from sqlalchemy.sql.expression import BindParameter + @compiles(BindParameter) def _render_literal_bindparam(element, compiler, use_my_literal_recipe=False, **kw): if not use_my_literal_recipe: @@ -250,6 +255,7 @@ include: # render the value directly return repr(element.value) + e = create_engine("postgresql+psycopg2://") print(stmt.compile(e, compile_kwargs={"use_my_literal_recipe": True})) @@ -265,6 +271,7 @@ include: from sqlalchemy import TypeDecorator + class UUIDStringify(TypeDecorator): impl = UUID @@ -275,6 +282,7 @@ include: or locally within the statement using :func:`_sql.type_coerce`, such as :: from sqlalchemy import type_coerce + stmt = select(A).where(type_coerce(A.data, UUIDStringify) == uuid.uuid4()) print(stmt.compile(e, compile_kwargs={"literal_binds": True})) @@ -331,7 +339,7 @@ in the same way, such as SQLite's positional form:: >>> e = create_engine("sqlite+pysqlite://") >>> compiled = stmt.compile(e, compile_kwargs={"render_postcompile": True}) >>> params = (repr(compiled.params[name]) for name in compiled.positiontup) - >>> print(re.sub(r'\?', lambda m: next(params), str(compiled))) + >>> print(re.sub(r"\?", lambda m: next(params), str(compiled))) SELECT a.id, a.data FROM a WHERE a.data IN (UUID('aa1944d6-9a5a-45d5-b8da-0ba1ef0a4f38'), UUID('a81920e6-15e2-4392-8a3c-d775ffa9ccd2'), UUID('b5574cdb-ff9b-49a3-be52-dbc89f087bfa')) @@ -414,13 +422,13 @@ I'm using op() to generate a custom operator and my parenthesis are not coming o The :meth:`.Operators.op` method allows one to create a custom database operator otherwise not known by SQLAlchemy:: - >>> print(column('q').op('->')(column('p'))) + >>> print(column("q").op("->")(column("p"))) q -> p However, when using it on the right side of a compound expression, it doesn't generate parenthesis as we expect:: - >>> print((column('q1') + column('q2')).op('->')(column('p'))) + >>> print((column("q1") + column("q2")).op("->")(column("p"))) q1 + q2 -> p Where above, we probably want ``(q1 + q2) -> p``. @@ -430,14 +438,14 @@ the :paramref:`.Operators.op.precedence` parameter, to a high number, where 100 is the maximum value, and the highest number used by any SQLAlchemy operator is currently 15:: - >>> print((column('q1') + column('q2')).op('->', precedence=100)(column('p'))) + >>> print((column("q1") + column("q2")).op("->", precedence=100)(column("p"))) (q1 + q2) -> p We can also usually force parenthesization around a binary expression (e.g. an expression that has left/right operands and an operator) using the :meth:`_expression.ColumnElement.self_group` method:: - >>> print((column('q1') + column('q2')).self_group().op('->')(column('p'))) + >>> print((column("q1") + column("q2")).self_group().op("->")(column("p"))) (q1 + q2) -> p Why are the parentheses rules like this? @@ -449,7 +457,7 @@ generate parenthesis based on groupings, it uses operator precedence and if the operator is known to be associative, so that parenthesis are generated minimally. Otherwise, an expression like:: - column('a') & column('b') & column('c') & column('d') + column("a") & column("b") & column("c") & column("d") would produce:: @@ -459,7 +467,7 @@ which is fine but would probably annoy people (and be reported as a bug). In other cases, it leads to things that are more likely to confuse databases or at the very least readability, such as:: - column('q', ARRAY(Integer, dimensions=2))[5][6] + column("q", ARRAY(Integer, dimensions=2))[5][6] would produce:: @@ -476,16 +484,16 @@ What if we defaulted the value of :paramref:`.Operators.op.precedence` to 100, e.g. the highest? Then this expression makes more parenthesis, but is otherwise OK, that is, these two are equivalent:: - >>> print((column('q') - column('y')).op('+', precedence=100)(column('z'))) + >>> print((column("q") - column("y")).op("+", precedence=100)(column("z"))) (q - y) + z - >>> print((column('q') - column('y')).op('+')(column('z'))) + >>> print((column("q") - column("y")).op("+")(column("z"))) q - y + z but these two are not:: - >>> print(column('q') - column('y').op('+', precedence=100)(column('z'))) + >>> print(column("q") - column("y").op("+", precedence=100)(column("z"))) q - y + z - >>> print(column('q') - column('y').op('+')(column('z'))) + >>> print(column("q") - column("y").op("+")(column("z"))) q - (y + z) For now, it's not clear that as long as we are doing parenthesization based on diff --git a/doc/build/faq/thirdparty.rst b/doc/build/faq/thirdparty.rst index 27c8fbf7434..4b8bb7c556c 100644 --- a/doc/build/faq/thirdparty.rst +++ b/doc/build/faq/thirdparty.rst @@ -28,17 +28,18 @@ by queries. This may be illustrated from code based on the following:: import numpy + class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) data = Column(Integer) + # .. later session.add(A(data=numpy.int64(10))) session.commit() - In the latter case, the issue is due to the ``numpy.int64`` datatype overriding the ``__eq__()`` method and enforcing that the return type of an expression is ``numpy.True`` or ``numpy.False``, which breaks SQLAlchemy's expression @@ -47,9 +48,9 @@ expressions from Python equality comparisons:: >>> import numpy >>> from sqlalchemy import column, Integer - >>> print(column('x', Integer) == numpy.int64(10)) # works + >>> print(column("x", Integer) == numpy.int64(10)) # works x = :x_1 - >>> print(numpy.int64(10) == column('x', Integer)) # breaks + >>> print(numpy.int64(10) == column("x", Integer)) # breaks False These errors are both solved in the same way, which is that special numpy @@ -61,9 +62,7 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and session.add(A(data=int(data))) - result = session.execute( - select(A.data).where(int(data) == A.data) - ) + result = session.execute(select(A.data).where(int(data) == A.data)) session.commit() @@ -72,4 +71,4 @@ applying the Python ``int()`` function to types like ``numpy.int32`` and SQL expression for WHERE/HAVING role expected, got True ------------------------------------------------------- -See :ref:`numpy_int64`. \ No newline at end of file +See :ref:`numpy_int64`. diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index 111adb13b9d..b7d5476e46c 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -74,7 +74,6 @@ Glossary # Session returns a Result that has ORM entities list_of_users = result.scalars().all() - reflection reflected In SQLAlchemy, this term refers to the feature of querying a database's @@ -191,7 +190,7 @@ Glossary dictionary is associated with a copy of the object, which contains key/value pairs significant to various internal systems, mostly within the ORM:: - some_column = Column('some_column', Integer) + some_column = Column("some_column", Integer) some_column_annotated = some_column._annotate({"entity": User}) The annotation system differs from the public dictionary :attr:`_schema.Column.info` @@ -265,7 +264,7 @@ Glossary on mapped classes. When a class is mapped as such:: class MyClass(Base): - __tablename__ = 'foo' + __tablename__ = "foo" id = Column(Integer, primary_key=True) data = Column(String) @@ -1062,16 +1061,17 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) .. seealso:: @@ -1113,15 +1113,16 @@ Glossary single department. A SQLAlchemy mapping might look like:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) department = relationship("Department") .. seealso:: @@ -1146,16 +1147,17 @@ Glossary used in :term:`one to many` as follows:: class Department(Base): - __tablename__ = 'department' + __tablename__ = "department" id = Column(Integer, primary_key=True) name = Column(String(30)) employees = relationship("Employee", backref="department") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) - dep_id = Column(Integer, ForeignKey('department.id')) + dep_id = Column(Integer, ForeignKey("department.id")) A backref can be applied to any relationship, including one to many, many to one, and :term:`many to many`. @@ -1207,24 +1209,25 @@ Glossary specified using plain table metadata:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) projects = relationship( "Project", - secondary=Table('employee_project', Base.metadata, - Column("employee_id", Integer, ForeignKey('employee.id'), - primary_key=True), - Column("project_id", Integer, ForeignKey('project.id'), - primary_key=True) - ), - backref="employees" - ) + secondary=Table( + "employee_project", + Base.metadata, + Column("employee_id", Integer, ForeignKey("employee.id"), primary_key=True), + Column("project_id", Integer, ForeignKey("project.id"), primary_key=True), + ), + backref="employees", + ) + class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) @@ -1320,30 +1323,29 @@ Glossary A SQLAlchemy declarative mapping for the above might look like:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(30)) class Project(Base): - __tablename__ = 'project' + __tablename__ = "project" id = Column(Integer, primary_key=True) name = Column(String(30)) class EmployeeProject(Base): - __tablename__ = 'employee_project' + __tablename__ = "employee_project" - employee_id = Column(Integer, ForeignKey('employee.id'), primary_key=True) - project_id = Column(Integer, ForeignKey('project.id'), primary_key=True) + employee_id = Column(Integer, ForeignKey("employee.id"), primary_key=True) + project_id = Column(Integer, ForeignKey("project.id"), primary_key=True) role_name = Column(String(30)) project = relationship("Project", backref="project_employees") employee = relationship("Employee", backref="employee_projects") - Employees can be added to a project given a role name:: proj = Project(name="Client A") @@ -1351,10 +1353,12 @@ Glossary emp1 = Employee(name="emp1") emp2 = Employee(name="emp2") - proj.project_employees.extend([ - EmployeeProject(employee=emp1, role_name="tech lead"), - EmployeeProject(employee=emp2, role_name="account executive") - ]) + proj.project_employees.extend( + [ + EmployeeProject(employee=emp1, role_name="tech lead"), + EmployeeProject(employee=emp2, role_name="account executive"), + ] + ) .. seealso:: diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 4f1b64d15b4..2d8ac407dea 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -203,7 +203,7 @@ Python prompt like this: .. sourcecode:: python+sql >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest: +SKIP + >>> sqlalchemy.__version__ # doctest: +SKIP 1.4.0 Next Steps diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index ad57d4ca079..6ca4de39c61 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -304,7 +304,6 @@ for each :func:`_orm.relationship` specify the common association table:: "Parent", secondary=association_table, back_populates="children" ) - When using the :paramref:`_orm.relationship.backref` parameter instead of :paramref:`_orm.relationship.back_populates`, the backref will automatically use the same :paramref:`_orm.relationship.secondary` argument for the @@ -321,9 +320,7 @@ reverse relationship:: class Parent(Base): __tablename__ = "left" id = Column(Integer, primary_key=True) - children = relationship( - "Child", secondary=association_table, backref="parents" - ) + children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): diff --git a/doc/build/orm/cascades.rst b/doc/build/orm/cascades.rst index 3c1180404c1..7cfd5d19dd5 100644 --- a/doc/build/orm/cascades.rst +++ b/doc/build/orm/cascades.rst @@ -109,10 +109,10 @@ and added to another:: >>> user1 = sess1.query(User).filter_by(id=1).first() >>> address1 = user1.addresses[0] - >>> sess1.close() # user1, address1 no longer associated with sess1 + >>> sess1.close() # user1, address1 no longer associated with sess1 >>> user1.addresses.remove(address1) # address1 no longer associated with user1 >>> sess2 = Session() - >>> sess2.add(user1) # ... but it still gets added to the new session, + >>> sess2.add(user1) # ... but it still gets added to the new session, >>> address1 in sess2 # because it's still "pending" for flush True @@ -588,9 +588,9 @@ default takes place on attribute change events emitted from backrefs. This is probably a confusing statement more easily described through demonstration; it means that, given a mapping such as this:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order') - }) + mapper_registry.map_imperatively( + Order, order_table, properties={"items": relationship(Item, backref="order")} + ) If an ``Order`` is already in the session, and is assigned to the ``order`` attribute of an ``Item``, the backref appends the ``Item`` to the ``items`` @@ -611,9 +611,11 @@ place:: This behavior can be disabled using the :paramref:`_orm.relationship.cascade_backrefs` flag:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship(Item, backref='order', cascade_backrefs=False) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={"items": relationship(Item, backref="order", cascade_backrefs=False)}, + ) So above, the assignment of ``i1.order = o1`` will append ``i1`` to the ``items`` collection of ``o1``, but will not add ``i1`` to the session. You can, of @@ -628,11 +630,17 @@ parameter may be set to ``False`` on the backref side by using the :func:`_orm.backref` function instead of a string. For example, the above relationship could be declared:: - mapper_registry.map_imperatively(Order, order_table, properties={ - 'items' : relationship( - Item, backref=backref('order', cascade_backrefs=False), cascade_backrefs=False - ) - }) + mapper_registry.map_imperatively( + Order, + order_table, + properties={ + "items": relationship( + Item, + backref=backref("order", cascade_backrefs=False), + cascade_backrefs=False, + ) + }, + ) This sets the ``cascade_backrefs=False`` behavior on both relationships. @@ -700,6 +708,7 @@ illustrated in the example below:: addresses = relationship("Address", cascade="all, delete-orphan") + # ... del user.addresses[1] diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst index 800d2613bdc..da50b3f8dbf 100644 --- a/doc/build/orm/collections.rst +++ b/doc/build/orm/collections.rst @@ -467,16 +467,21 @@ interface are detected and instrumented via duck-typing: class ListLike(object): def __init__(self): self.data = [] + def append(self, item): self.data.append(item) + def remove(self, item): self.data.remove(item) + def extend(self, items): self.data.extend(items) + def __iter__(self): return iter(self.data) + def foo(self): - return 'foo' + return "foo" ``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't @@ -491,10 +496,13 @@ explicit about the interface you are implementing by providing an def __init__(self): self.data = set() + def append(self, item): self.data.add(item) + def remove(self, item): self.data.remove(item) + def __iter__(self): return iter(self.data) @@ -522,6 +530,7 @@ get the job done. from sqlalchemy.orm.collections import collection + class SetLike(object): __emulates__ = set @@ -580,6 +589,7 @@ collection support to other classes. It uses a keying function to delegate to from sqlalchemy.util import OrderedDict from sqlalchemy.orm.collections import MappedCollection + class NodeMap(OrderedDict, MappedCollection): """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained.""" @@ -643,6 +653,7 @@ to restrict the decorations to just your usage in relationships. For example: class MyAwesomeList(some.great.library.AwesomeList): pass + # ... relationship(..., collection_class=MyAwesomeList) The ORM uses this approach for built-ins, quietly substituting a trivial diff --git a/doc/build/orm/composites.rst b/doc/build/orm/composites.rst index 181993db5c0..670ae871fd1 100644 --- a/doc/build/orm/composites.rst +++ b/doc/build/orm/composites.rst @@ -24,11 +24,7 @@ A simple example represents pairs of columns as a ``Point`` object. return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return ( - isinstance(other, Point) - and other.x == self.x - and other.y == self.y - ) + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -180,11 +176,7 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return f"Point(x={self.x!r}, y={self.y!r})" def __eq__(self, other): - return ( - isinstance(other, Point) - and other.x == self.x - and other.y == self.y - ) + return isinstance(other, Point) and other.x == self.x and other.y == self.y def __ne__(self, other): return not self.__eq__(other) @@ -201,10 +193,7 @@ itself be a composite object, which is then mapped to a class ``HasVertex``:: return Vertex(Point(x1, y1), Point(x2, y2)) def __composite_values__(self): - return ( - self.start.__composite_values__() - + self.end.__composite_values__() - ) + return self.start.__composite_values__() + self.end.__composite_values__() class HasVertex(Base): @@ -224,7 +213,10 @@ We can then use the above mapping as:: s.add(hv) s.commit() - hv = s.query(HasVertex).filter( - HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))).first() + hv = ( + s.query(HasVertex) + .filter(HasVertex.vertex == Vertex(Point(1, 2), Point(3, 4))) + .first() + ) print(hv.vertex.start) print(hv.vertex.end) diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index 102ea50d885..9fadf6c732b 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -253,6 +253,7 @@ this in conjunction with a hypothetical event marker provided by the web framewo Session = scoped_session(sessionmaker(bind=some_engine), scopefunc=get_current_request) + @on_request_end def remove_session(req): Session.remove() diff --git a/doc/build/orm/dataclasses.rst b/doc/build/orm/dataclasses.rst index fa37e011e58..4e9943e76ee 100644 --- a/doc/build/orm/dataclasses.rst +++ b/doc/build/orm/dataclasses.rst @@ -173,9 +173,7 @@ association:: __tablename__ = "user" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) name: str = field(default=None, metadata={"sa": Column(String(50))}) fullname: str = field(default=None, metadata={"sa": Column(String(50))}) nickname: str = field(default=None, metadata={"sa": Column(String(12))}) @@ -189,15 +187,9 @@ association:: class Address: __tablename__ = "address" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) - user_id: int = field( - init=False, metadata={"sa": Column(ForeignKey("user.id"))} - ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) + user_id: int = field(init=False, metadata={"sa": Column(ForeignKey("user.id"))}) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) .. _orm_imperative_dataclasses: @@ -231,6 +223,7 @@ variables:: mapper_registry = registry() + @dataclass class User: id: int = field(init=False) @@ -239,34 +232,40 @@ variables:: nickname: str = None addresses: List[Address] = field(default_factory=list) + @dataclass class Address: id: int = field(init=False) user_id: int = field(init=False) email_address: str = None + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) @@ -302,9 +301,7 @@ came from a mixin that is itself a dataclass, the form would be:: __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) addresses: List[Address] = field( default_factory=list, metadata={"sa": lambda: relationship("Address")} @@ -315,15 +312,11 @@ came from a mixin that is itself a dataclass, the form would be:: class AddressMixin: __tablename__ = "address" __sa_dataclass_metadata_key__ = "sa" - id: int = field( - init=False, metadata={"sa": Column(Integer, primary_key=True)} - ) + id: int = field(init=False, metadata={"sa": Column(Integer, primary_key=True)}) user_id: int = field( init=False, metadata={"sa": lambda: Column(ForeignKey("user.id"))} ) - email_address: str = field( - default=None, metadata={"sa": Column(String(50))} - ) + email_address: str = field(default=None, metadata={"sa": Column(String(50))}) @mapper_registry.mapped @@ -422,6 +415,7 @@ object is declared inline with the declarative class. The } } + @mapper_registry.mapped @define(slots=False) class Address: @@ -436,7 +430,6 @@ object is declared inline with the declarative class. The user_id: int email_address: Optional[str] - .. note:: The ``attrs`` ``slots=True`` option, which enables ``__slots__`` on a mapped class, cannot be used with SQLAlchemy mappings without fully implementing alternative @@ -469,6 +462,7 @@ as well:: mapper_registry = registry() + @define(slots=False) class User: id: int @@ -477,34 +471,40 @@ as well:: nickname: str addresses: List[Address] + @define(slots=False) class Address: id: int user_id: int email_address: Optional[str] + metadata_obj = MetaData() user = Table( - 'user', + "user", metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)), + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) address = Table( - 'address', + "address", metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)), + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses': relationship(Address, backref='user', order_by=address.c.id), - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id), + }, + ) mapper_registry.map_imperatively(Address, address) diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 55bf0f74c79..3a811ed82ec 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -117,9 +117,7 @@ hybrid table style:: Column("lastname", String(50)), ) - fullname = column_property( - __table__.c.firstname + " " + __table__.c.lastname - ) + fullname = column_property(__table__.c.firstname + " " + __table__.c.lastname) addresses = relationship("Address", back_populates="user") @@ -182,14 +180,12 @@ particular columns as part of what the ORM should consider to be a primary key for the class, independently of schema-level primary key constraints:: class GroupUsers(Base): - __tablename__ = 'group_users' + __tablename__ = "group_users" user_id = Column(String(40)) group_id = Column(String(40)) - __mapper_args__ = { - "primary_key": [user_id, group_id] - } + __mapper_args__ = {"primary_key": [user_id, group_id]} .. seealso:: @@ -246,7 +242,6 @@ configuring a single-table inheritance mapping:: polymorphic_identity="employee", ) - .. seealso:: :ref:`single_inheritance` - background on the ORM single table inheritance @@ -282,21 +277,23 @@ collection:: def __mapper_args__(cls): return { "exclude_properties": [ - column.key for column in cls.__table__.c if - column.info.get("exclude", False) + column.key + for column in cls.__table__.c + if column.info.get("exclude", False) ] } + Base = declarative_base() + class SomeClass(ExcludeColsWFlag, Base): - __tablename__ = 'some_table' + __tablename__ = "some_table" id = Column(Integer, primary_key=True) data = Column(String) not_needed = Column(String, info={"exclude": True}) - Above, the ``ExcludeColsWFlag`` mixin provides a per-class ``__mapper_args__`` hook that will scan for :class:`.Column` objects that include the key/value ``'exclude': True`` passed to the :paramref:`.Column.info` parameter, and then @@ -323,7 +320,7 @@ assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): - "" + """""" # do something with mappings ``__declare_first__()`` @@ -335,7 +332,7 @@ configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): - "" + """""" # do something before mappings are configured .. versionadded:: 0.9.3 diff --git a/doc/build/orm/declarative_mixins.rst b/doc/build/orm/declarative_mixins.rst index 21345ccdc9c..29ac56b97d6 100644 --- a/doc/build/orm/declarative_mixins.rst +++ b/doc/build/orm/declarative_mixins.rst @@ -154,10 +154,11 @@ patterns common to many classes can be defined as callables:: class ReferenceAddressMixin: @declared_attr def address_id(cls): - return Column(Integer, ForeignKey('address.id')) + return Column(Integer, ForeignKey("address.id")) + class User(ReferenceAddressMixin, Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) Where above, the ``address_id`` class-level callable is executed at the @@ -176,11 +177,12 @@ will resolve them at class construction time:: def type_(cls): return Column(String(50)) - __mapper_args__= {'polymorphic_on':type_} + __mapper_args__ = {"polymorphic_on": type_} + class MyModel(MyMixin, Base): - __tablename__='test' - id = Column(Integer, primary_key=True) + __tablename__ = "test" + id = Column(Integer, primary_key=True) .. _orm_declarative_mixins_relationships: @@ -199,7 +201,7 @@ reference a common target class via many-to-one:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): @@ -220,7 +222,6 @@ reference a common target class via many-to-one:: __tablename__ = "target" id = Column(Integer, primary_key=True) - Using Advanced Relationship Arguments (e.g. ``primaryjoin``, etc.) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -266,13 +267,11 @@ The condition above is resolved using a lambda:: class RefTargetMixin: @declared_attr def target_id(cls): - return Column('target_id', ForeignKey('target.id')) + return Column("target_id", ForeignKey("target.id")) @declared_attr def target(cls): - return relationship(Target, - primaryjoin=lambda: Target.id==cls.target_id - ) + return relationship(Target, primaryjoin=lambda: Target.id == cls.target_id) or alternatively, the string form (which ultimately generates a lambda):: @@ -284,9 +283,7 @@ or alternatively, the string form (which ultimately generates a lambda):: @declared_attr def target(cls): - return relationship( - Target, primaryjoin=f"Target.id=={cls.__name__}.target_id" - ) + return relationship(Target, primaryjoin=f"Target.id=={cls.__name__}.target_id") .. seealso:: @@ -526,9 +523,7 @@ establish it as part of ``__table_args__``:: @declared_attr def __table_args__(cls): - return ( - Index(f"test_idx_{cls.__tablename__}", "a", "b"), - ) + return (Index(f"test_idx_{cls.__tablename__}", "a", "b"),) class MyModel(MyMixin, Base): diff --git a/doc/build/orm/declarative_styles.rst b/doc/build/orm/declarative_styles.rst index 7a68d6fbb32..c1536a78ba3 100644 --- a/doc/build/orm/declarative_styles.rst +++ b/doc/build/orm/declarative_styles.rst @@ -23,7 +23,6 @@ The most common approach is to generate a "base" class using the # declarative base class Base = declarative_base() - The declarative base class may also be created from an existing :class:`_orm.registry`, by using the :meth:`_orm.registry.generate_base` method:: @@ -91,6 +90,7 @@ be produced in a fully explicit fashion using the mapper_registry = registry() + class Base(metaclass=DeclarativeMeta): __abstract__ = True diff --git a/doc/build/orm/declarative_tables.rst b/doc/build/orm/declarative_tables.rst index dad145c42f5..986205ec0d2 100644 --- a/doc/build/orm/declarative_tables.rst +++ b/doc/build/orm/declarative_tables.rst @@ -343,12 +343,11 @@ use a declarative hybrid mapping, passing the from sqlalchemy import Table from sqlalchemy.orm import declarative_base - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Base = declarative_base() + class MyClass(Base): __table__ = Table( "mytable", @@ -365,16 +364,15 @@ objects at once, then refer to them from the :class:`.MetaData`:: from sqlalchemy import Table from sqlalchemy.orm import declarative_base - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Base = declarative_base() Base.metadata.reflect(engine) + class MyClass(Base): - __table__ = Base.metadata.tables['mytable'] + __table__ = Base.metadata.tables["mytable"] .. seealso:: @@ -430,9 +428,7 @@ the ``Reflected.prepare`` method is called. The above mapping is not complete until we do so, given an :class:`_engine.Engine`:: - engine = create_engine( - "postgresql+psycopg2://user:pass@hostname/my_existing_database" - ) + engine = create_engine("postgresql+psycopg2://user:pass@hostname/my_existing_database") Reflected.prepare(engine) The purpose of the ``Reflected`` class is to define the scope at which diff --git a/doc/build/orm/extensions/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst index 6c7bfcee075..214338a0b7a 100644 --- a/doc/build/orm/extensions/associationproxy.rst +++ b/doc/build/orm/extensions/associationproxy.rst @@ -29,6 +29,7 @@ Each ``User`` can have any number of ``Keyword`` objects, and vice-versa Base = declarative_base() + class User(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) @@ -232,7 +233,6 @@ objects that are obtained from the underlying ``UserKeywordAssociation`` element >>> user = User("log") >>> for kw in (Keyword("new_from_blammo"), Keyword("its_big")): ... user.keywords.append(kw) - ... >>> print(user.keywords) [Keyword('new_from_blammo'), Keyword('its_big')] @@ -441,21 +441,18 @@ transparently using the association proxy. In the example below, we illustrate usage of the assignment operator, also appropriately handled by the association proxy, to apply a dictionary value to the collection at once:: - >>> user = User('log') - >>> user.keywords = { - ... 'sk1':'kw1', - ... 'sk2':'kw2' - ... } + >>> user = User("log") + >>> user.keywords = {"sk1": "kw1", "sk2": "kw2"} >>> print(user.keywords) {'sk1': 'kw1', 'sk2': 'kw2'} - >>> user.keywords['sk3'] = 'kw3' - >>> del user.keywords['sk2'] + >>> user.keywords["sk3"] = "kw3" + >>> del user.keywords["sk2"] >>> print(user.keywords) {'sk1': 'kw1', 'sk3': 'kw3'} >>> # illustrate un-proxied usage - ... print(user.user_keyword_associations['sk3'].kw) + ... print(user.user_keyword_associations["sk3"].kw) <__main__.Keyword object at 0x12ceb90> One caveat with our example above is that because ``Keyword`` objects are created @@ -513,9 +510,7 @@ to a related object, as in the example mapping below:: ) # column-targeted association proxy - special_keys = association_proxy( - "user_keyword_associations", "special_key" - ) + special_keys = association_proxy("user_keyword_associations", "special_key") class UserKeywordAssociation(Base): @@ -531,7 +526,6 @@ to a related object, as in the example mapping below:: id = Column(Integer, primary_key=True) keyword = Column("keyword", String(64)) - The SQL generated takes the form of a correlated subquery against the EXISTS SQL operator so that it can be used in a WHERE clause without the need for additional modifications to the enclosing query. If the diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index c21d561b6bd..9ae1dfc2378 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -192,9 +192,7 @@ illustrates a complete example including mapper and session configuration:: # expire_on_commit=False will prevent attributes from being expired # after commit. - async_session = sessionmaker( - engine, expire_on_commit=False, class_=AsyncSession - ) + async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession) async with async_session() as session: async with session.begin(): @@ -595,7 +593,6 @@ constructs are illustrated below:: asyncio.run(go()) - The above example prints something along the lines of:: New DBAPI connection: > @@ -779,14 +776,14 @@ the usual ``await`` keywords are necessary, including for the :meth:`_asyncio.async_scoped_session.remove` method:: async def some_function(some_async_session, some_object): - # use the AsyncSession directly - some_async_session.add(some_object) + # use the AsyncSession directly + some_async_session.add(some_object) - # use the AsyncSession via the context-local proxy - await AsyncScopedSession.commit() + # use the AsyncSession via the context-local proxy + await AsyncScopedSession.commit() - # "remove" the current proxied AsyncSession for the local context - await AsyncScopedSession.remove() + # "remove" the current proxied AsyncSession for the local context + await AsyncScopedSession.remove() .. versionadded:: 1.4.19 diff --git a/doc/build/orm/extensions/baked.rst b/doc/build/orm/extensions/baked.rst index b3c21716a2a..60bf06b2a14 100644 --- a/doc/build/orm/extensions/baked.rst +++ b/doc/build/orm/extensions/baked.rst @@ -213,6 +213,7 @@ Our example becomes:: my_simple_cache = {} + def lookup(session, id_argument): if "my_key" not in my_simple_cache: query = session.query(Model).filter(Model.id == bindparam("id")) @@ -294,6 +295,7 @@ into a direct use of "bakery" as follows:: parameterized_query = bakery.bake(create_model_query) if include_frobnizzle: + def include_frobnizzle_in_query(query): return query.filter(Model.frobnizzle == True) @@ -362,9 +364,7 @@ statement compilation time:: bakery = baked.bakery() baked_query = bakery(lambda session: session.query(User)) - baked_query += lambda q: q.filter( - User.name.in_(bindparam("username", expanding=True)) - ) + baked_query += lambda q: q.filter(User.name.in_(bindparam("username", expanding=True))) result = baked_query.with_session(session).params(username=["ed", "fred"]).all() diff --git a/doc/build/orm/extensions/declarative/mixins.rst b/doc/build/orm/extensions/declarative/mixins.rst index cde4c12bd16..7a18f07a7f3 100644 --- a/doc/build/orm/extensions/declarative/mixins.rst +++ b/doc/build/orm/extensions/declarative/mixins.rst @@ -5,4 +5,4 @@ Mixin and Custom Base Classes ============================= -See :ref:`orm_mixins_toplevel` for this section. \ No newline at end of file +See :ref:`orm_mixins_toplevel` for this section. diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index 0d808f5c8aa..b0d89306502 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -116,9 +116,7 @@ mapping, using the typical example of the ``User`` class:: # a select() construct makes use of SQL expressions derived from the # User class itself - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) Above, the steps that the Mypy extension can take include: @@ -161,9 +159,7 @@ following:: ) name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__( - self, id: Optional[int] = ..., name: Optional[str] = ... - ) -> None: + def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: ... @@ -171,10 +167,7 @@ following:: print(f"Username: {some_user.name}") - select_stmt = ( - select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) - ) - + select_stmt = select(User).where(User.id.in_([3, 4, 5])).where(User.name.contains("s")) The key steps which have been taken above include: @@ -452,9 +445,7 @@ applied explicitly:: id = Column(Integer, primary_key=True) name = Column(String) - addresses: Mapped[List["Address"]] = relationship( - "Address", back_populates="user" - ) + addresses: Mapped[List["Address"]] = relationship("Address", back_populates="user") class Address(Base): diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst index 4fd3569be5b..9c64668da1c 100644 --- a/doc/build/orm/inheritance.rst +++ b/doc/build/orm/inheritance.rst @@ -641,6 +641,7 @@ almost the same way as we do other forms of inheritance mappings:: Base = declarative_base() + class Employee(ConcreteBase, Base): __tablename__ = "employee" id = Column(Integer, primary_key=True) @@ -749,6 +750,7 @@ base class with the ``__abstract__`` indicator:: Base = declarative_base() + class Employee(Base): __abstract__ = True @@ -817,6 +819,7 @@ class called :class:`.AbstractConcreteBase` which achieves this automatically:: "concrete": True, } + Base.registry.configure() Above, the :meth:`_orm.registry.configure` method is invoked, which will @@ -997,7 +1000,6 @@ mapping is illustrated below:: "concrete": True, } - Above, we use :func:`.polymorphic_union` in the same manner as before, except that we omit the ``employee`` table. diff --git a/doc/build/orm/inheritance_loading.rst b/doc/build/orm/inheritance_loading.rst index daf60b7f834..281a43a5c52 100644 --- a/doc/build/orm/inheritance_loading.rst +++ b/doc/build/orm/inheritance_loading.rst @@ -104,7 +104,7 @@ subclasses: entity = with_polymorphic(Employee, [Engineer, Manager]) # include columns for all mapped subclasses - entity = with_polymorphic(Employee, '*') + entity = with_polymorphic(Employee, "*") .. tip:: @@ -135,18 +135,15 @@ with the same name: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], aliased=True) - manager_employee = with_polymorphic( - Employee, [Manager], aliased=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], aliased=True) + manager_employee = with_polymorphic(Employee, [Manager], aliased=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -195,18 +192,15 @@ is necessary: .. sourcecode:: python+sql - engineer_employee = with_polymorphic( - Employee, [Engineer], flat=True) - manager_employee = with_polymorphic( - Employee, [Manager], flat=True) - - q = s.query(engineer_employee, manager_employee).\ - join( - manager_employee, - and_( - engineer_employee.id > manager_employee.id, - engineer_employee.name == manager_employee.name - ) + engineer_employee = with_polymorphic(Employee, [Engineer], flat=True) + manager_employee = with_polymorphic(Employee, [Manager], flat=True) + + q = s.query(engineer_employee, manager_employee).join( + manager_employee, + and_( + engineer_employee.id > manager_employee.id, + engineer_employee.name == manager_employee.name, + ), ) q.all() {opensql} @@ -260,11 +254,11 @@ specific to ``Engineer`` as well as ``Manager`` in terms of ``eng_plus_manager`` eng_plus_manager = with_polymorphic(Employee, [Engineer, Manager]) query = session.query(eng_plus_manager).filter( - or_( - eng_plus_manager.Engineer.engineer_info=='x', - eng_plus_manager.Manager.manager_data=='y' - ) - ) + or_( + eng_plus_manager.Engineer.engineer_info == "x", + eng_plus_manager.Manager.manager_data == "y", + ) + ) A query as above would generate SQL resembling the following: @@ -307,15 +301,15 @@ default. We can add the parameter to our ``Employee`` mapping first introduced at :ref:`joined_inheritance`:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) __mapper_args__ = { - 'polymorphic_identity':'employee', - 'polymorphic_on':type, - 'with_polymorphic': '*' + "polymorphic_identity": "employee", + "polymorphic_on": type, + "with_polymorphic": "*", } Above is a common setting for :paramref:`.mapper.with_polymorphic`, @@ -339,22 +333,17 @@ that they should individually participate in polymorphic loading by default using the :paramref:`.mapper.polymorphic_load` parameter:: class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'engineer', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "engineer", "polymorphic_load": "inline"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'polymorphic_load': 'inline' - } + __mapper_args__ = {"polymorphic_identity": "manager", "polymorphic_load": "inline"} Setting the :paramref:`.mapper.polymorphic_load` parameter to the value ``"inline"`` means that the ``Engineer`` and ``Manager`` classes above @@ -374,14 +363,9 @@ that entity, so that the entity (and its subclasses) can be referred to directly, rather than using an alias object. For simple cases it might be considered to be more succinct:: - session.query(Employee).\ - with_polymorphic([Engineer, Manager]).\ - filter( - or_( - Engineer.engineer_info=='w', - Manager.manager_data=='q' - ) - ) + session.query(Employee).with_polymorphic([Engineer, Manager]).filter( + or_(Engineer.engineer_info == "w", Manager.manager_data == "q") + ) The :meth:`_query.Query.with_polymorphic` method has a more complicated job than the :func:`_orm.with_polymorphic` function, as it needs to correctly @@ -445,37 +429,35 @@ by default by specifying the :paramref:`.mapper.polymorphic_load` parameter, using the value ``"selectin"`` on a per-subclass basis:: class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) name = Column(String(50)) type = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity': 'employee', - 'polymorphic_on': type - } + __mapper_args__ = {"polymorphic_identity": "employee", "polymorphic_on": type} + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'engineer', + "polymorphic_load": "selectin", + "polymorphic_identity": "engineer", } + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } - Unlike when using :func:`_orm.with_polymorphic`, when using the :func:`_orm.selectin_polymorphic` style of loading, we do **not** have the ability to refer to the ``Engineer`` or ``Manager`` entities within our main @@ -491,8 +473,7 @@ loading via the :func:`_orm.joinedload` function:: from sqlalchemy.orm import selectin_polymorphic query = session.query(Employee).options( - selectin_polymorphic(Employee, [Manager, Engineer]), - joinedload(Manager.paperwork) + selectin_polymorphic(Employee, [Manager, Engineer]), joinedload(Manager.paperwork) ) Using the query above, we get three SELECT statements emitted, however @@ -541,24 +522,22 @@ a load of ``Manager`` also fully loads ``VicePresident`` subtypes at the same ti # use "Employee" example from the enclosing section + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_name = Column(String(30)) __mapper_args__ = { - 'polymorphic_load': 'selectin', - 'polymorphic_identity': 'manager', + "polymorphic_load": "selectin", + "polymorphic_identity": "manager", } + class VicePresident(Manager): vp_info = Column(String(30)) - __mapper_args__ = { - "polymorphic_load": "inline", - "polymorphic_identity": "vp" - } - + __mapper_args__ = {"polymorphic_load": "inline", "polymorphic_identity": "vp"} Above, we add a ``vp_info`` column to the ``manager`` table, local to the ``VicePresident`` subclass. This subclass is linked to the polymorphic @@ -592,8 +571,7 @@ set up, we could get the same result as follows:: manager_poly = with_polymorphic(Manager, [VicePresident]) - s.query(Employee).options( - selectin_polymorphic(Employee, [manager_poly])).all() + s.query(Employee).options(selectin_polymorphic(Employee, [manager_poly])).all() .. _inheritance_of_type: @@ -619,33 +597,35 @@ with a ``Company`` object. We'll add a ``company_id`` column to the .. sourcecode:: python class Company(Base): - __tablename__ = 'company' + __tablename__ = "company" id = Column(Integer, primary_key=True) name = Column(String(50)) - employees = relationship("Employee", - backref='company') + employees = relationship("Employee", backref="company") + class Employee(Base): - __tablename__ = 'employee' + __tablename__ = "employee" id = Column(Integer, primary_key=True) type = Column(String(20)) - company_id = Column(Integer, ForeignKey('company.id')) + company_id = Column(Integer, ForeignKey("company.id")) __mapper_args__ = { - 'polymorphic_on':type, - 'polymorphic_identity':'employee', + "polymorphic_on": type, + "polymorphic_identity": "employee", } + class Engineer(Employee): - __tablename__ = 'engineer' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "engineer" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) engineer_info = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'engineer'} + __mapper_args__ = {"polymorphic_identity": "engineer"} + class Manager(Employee): - __tablename__ = 'manager' - id = Column(Integer, ForeignKey('employee.id'), primary_key=True) + __tablename__ = "manager" + id = Column(Integer, ForeignKey("employee.id"), primary_key=True) manager_data = Column(String(50)) - __mapper_args__ = {'polymorphic_identity':'manager'} + __mapper_args__ = {"polymorphic_identity": "manager"} When querying from ``Company`` onto the ``Employee`` relationship, the :meth:`_query.Query.join` method as well as operators like :meth:`.PropComparator.any` @@ -656,34 +636,29 @@ against the ``Engineer`` class, we can tell those methods to join or subquery against the set of columns representing the subclass using the :meth:`~.orm.interfaces.PropComparator.of_type` operator:: - session.query(Company).\ - join(Company.employees.of_type(Engineer)).\ - filter(Engineer.engineer_info=='someinfo') + session.query(Company).join(Company.employees.of_type(Engineer)).filter( + Engineer.engineer_info == "someinfo" + ) Similarly, to join from ``Company`` to the polymorphic entity that includes both ``Engineer`` and ``Manager`` columns:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer]) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer]) - session.query(Company).\ - join(Company.employees.of_type(manager_and_engineer)).\ - filter( - or_( - manager_and_engineer.Engineer.engineer_info == 'someinfo', - manager_and_engineer.Manager.manager_data == 'somedata' - ) + session.query(Company).join(Company.employees.of_type(manager_and_engineer)).filter( + or_( + manager_and_engineer.Engineer.engineer_info == "someinfo", + manager_and_engineer.Manager.manager_data == "somedata", ) + ) The :meth:`.PropComparator.any` and :meth:`.PropComparator.has` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type`, such as when the embedded criterion is in terms of a subclass:: - session.query(Company).\ - filter( - Company.employees.of_type(Engineer). - any(Engineer.engineer_info=='someinfo') - ).all() + session.query(Company).filter( + Company.employees.of_type(Engineer).any(Engineer.engineer_info == "someinfo") + ).all() .. _eagerloading_polymorphic_subtypes: @@ -708,16 +683,11 @@ can be used to combine eager loading and :func:`_orm.with_polymorphic`, so that all sub-attributes of all referenced subtypes can be loaded:: - manager_and_engineer = with_polymorphic( - Employee, [Manager, Engineer], - flat=True) + manager_and_engineer = with_polymorphic(Employee, [Manager, Engineer], flat=True) - session.query(Company).\ - options( - joinedload( - Company.employees.of_type(manager_and_engineer) - ) - ) + session.query(Company).options( + joinedload(Company.employees.of_type(manager_and_engineer)) + ) .. note:: @@ -866,9 +836,7 @@ In our example from :ref:`single_inheritance`, the ``Manager`` mapping for examp class Manager(Employee): manager_data = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'manager' - } + __mapper_args__ = {"polymorphic_identity": "manager"} Above, there would be no ``Employee.manager_data`` attribute, even though the ``employee`` table has a ``manager_data`` column. @@ -914,13 +882,10 @@ inheritance in the case of single inheritance; it allows both for eager loading of subclass attributes as well as specification of subclasses in a query, just without the overhead of using OUTER JOIN:: - employee_poly = with_polymorphic(Employee, '*') + employee_poly = with_polymorphic(Employee, "*") q = session.query(employee_poly).filter( - or_( - employee_poly.name == 'a', - employee_poly.Manager.manager_data == 'b' - ) + or_(employee_poly.name == "a", employee_poly.Manager.manager_data == "b") ) Above, our query remains against a single table however we can refer to the diff --git a/doc/build/orm/join_conditions.rst b/doc/build/orm/join_conditions.rst index 509ccc98f39..e9ab6a39eef 100644 --- a/doc/build/orm/join_conditions.rst +++ b/doc/build/orm/join_conditions.rst @@ -25,8 +25,9 @@ class:: Base = declarative_base() + class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -36,8 +37,9 @@ class:: billing_address = relationship("Address") shipping_address = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) street = Column(String) city = Column(String) @@ -64,7 +66,7 @@ by instructing for each one which foreign key column should be considered, and the appropriate form is as follows:: class Customer(Base): - __tablename__ = 'customer' + __tablename__ = "customer" id = Column(Integer, primary_key=True) name = Column(String) @@ -127,18 +129,21 @@ load those ``Address`` objects which specify a city of "Boston":: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) - boston_addresses = relationship("Address", - primaryjoin="and_(User.id==Address.user_id, " - "Address.city=='Boston')") + boston_addresses = relationship( + "Address", + primaryjoin="and_(User.id==Address.user_id, " "Address.city=='Boston')", + ) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) street = Column(String) city = Column(String) @@ -208,19 +213,21 @@ type of the other:: Base = declarative_base() + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) content = Column(String(50)) # relationship() using explicit foreign_keys, remote_side - parent_host = relationship("HostEntry", - primaryjoin=ip_address == cast(content, INET), - foreign_keys=content, - remote_side=ip_address - ) + parent_host = relationship( + "HostEntry", + primaryjoin=ip_address == cast(content, INET), + foreign_keys=content, + remote_side=ip_address, + ) The above relationship will produce a join like:: @@ -241,8 +248,9 @@ SQL expressions:: from sqlalchemy.orm import foreign, remote + class HostEntry(Base): - __tablename__ = 'host_entry' + __tablename__ = "host_entry" id = Column(Integer, primary_key=True) ip_address = Column(INET) @@ -250,11 +258,10 @@ SQL expressions:: # relationship() using explicit foreign() and remote() annotations # in lieu of separate arguments - parent_host = relationship("HostEntry", - primaryjoin=remote(ip_address) == \ - cast(foreign(content), INET), - ) - + parent_host = relationship( + "HostEntry", + primaryjoin=remote(ip_address) == cast(foreign(content), INET), + ) .. _relationship_custom_operator: @@ -273,18 +280,20 @@ A comparison like the above may be used directly with a :func:`_orm.relationship`:: class IPA(Base): - __tablename__ = 'ip_address' + __tablename__ = "ip_address" id = Column(Integer, primary_key=True) v4address = Column(INET) - network = relationship("Network", - primaryjoin="IPA.v4address.bool_op('<<')" - "(foreign(Network.v4representation))", - viewonly=True - ) + network = relationship( + "Network", + primaryjoin="IPA.v4address.bool_op('<<')" "(foreign(Network.v4representation))", + viewonly=True, + ) + + class Network(Base): - __tablename__ = 'network' + __tablename__ = "network" id = Column(Integer, primary_key=True) v4representation = Column(CIDR) @@ -317,6 +326,7 @@ two expressions. The below example illustrates this with the from sqlalchemy import Column, Integer, func from sqlalchemy.orm import relationship, foreign + class Polygon(Base): __tablename__ = "polygon" id = Column(Integer, primary_key=True) @@ -327,6 +337,7 @@ two expressions. The below example illustrates this with the viewonly=True, ) + class Point(Base): __tablename__ = "point" id = Column(Integer, primary_key=True) @@ -356,35 +367,34 @@ for both; then to make ``Article`` refer to ``Writer`` as well, ``Article.magazine`` and ``Article.writer``:: class Magazine(Base): - __tablename__ = 'magazine' + __tablename__ = "magazine" id = Column(Integer, primary_key=True) class Article(Base): - __tablename__ = 'article' + __tablename__ = "article" article_id = Column(Integer) - magazine_id = Column(ForeignKey('magazine.id')) + magazine_id = Column(ForeignKey("magazine.id")) writer_id = Column() magazine = relationship("Magazine") writer = relationship("Writer") __table_args__ = ( - PrimaryKeyConstraint('article_id', 'magazine_id'), + PrimaryKeyConstraint("article_id", "magazine_id"), ForeignKeyConstraint( - ['writer_id', 'magazine_id'], - ['writer.id', 'writer.magazine_id'] + ["writer_id", "magazine_id"], ["writer.id", "writer.magazine_id"] ), ) class Writer(Base): - __tablename__ = 'writer' + __tablename__ = "writer" id = Column(Integer, primary_key=True) - magazine_id = Column(ForeignKey('magazine.id'), primary_key=True) + magazine_id = Column(ForeignKey("magazine.id"), primary_key=True) magazine = relationship("Magazine") When the above mapping is configured, we will see this warning emitted:: @@ -431,7 +441,7 @@ To get just #1 and #2, we could specify only ``Article.writer_id`` as the class Article(Base): # ... - writer = relationship("Writer", foreign_keys='Article.writer_id') + writer = relationship("Writer", foreign_keys="Article.writer_id") However, this has the effect of ``Article.writer`` not taking ``Article.magazine_id`` into account when querying against ``Writer``: @@ -456,7 +466,8 @@ annotating with :func:`_orm.foreign`:: writer = relationship( "Writer", primaryjoin="and_(Writer.id == foreign(Article.writer_id), " - "Writer.magazine_id == Article.magazine_id)") + "Writer.magazine_id == Article.magazine_id)", + ) .. versionchanged:: 1.0.0 the ORM will attempt to warn when a column is used as the synchronization target from more than one relationship @@ -482,16 +493,16 @@ is considered to be "many to one". For the comparison we'll use here, we'll be dealing with collections so we keep things configured as "one to many":: class Element(Base): - __tablename__ = 'element' + __tablename__ = "element" path = Column(String, primary_key=True) - descendants = relationship('Element', - primaryjoin= - remote(foreign(path)).like( - path.concat('/%')), - viewonly=True, - order_by=path) + descendants = relationship( + "Element", + primaryjoin=remote(foreign(path)).like(path.concat("/%")), + viewonly=True, + order_by=path, + ) Above, if given an ``Element`` object with a path attribute of ``"/foo/bar2"``, we seek for a load of ``Element.descendants`` to look like:: @@ -530,20 +541,24 @@ is when establishing a many-to-many relationship from a class to itself, as show Base = declarative_base() - node_to_node = Table("node_to_node", Base.metadata, + node_to_node = Table( + "node_to_node", + Base.metadata, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) + class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary=node_to_node, - primaryjoin=id==node_to_node.c.left_node_id, - secondaryjoin=id==node_to_node.c.right_node_id, - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary=node_to_node, + primaryjoin=id == node_to_node.c.left_node_id, + secondaryjoin=id == node_to_node.c.right_node_id, + backref="left_nodes", ) Where above, SQLAlchemy can't know automatically which columns should connect @@ -561,14 +576,15 @@ When referring to a plain :class:`_schema.Table` object in a declarative string, use the string name of the table as it is present in the :class:`_schema.MetaData`:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) label = Column(String) - right_nodes = relationship("Node", - secondary="node_to_node", - primaryjoin="Node.id==node_to_node.c.left_node_id", - secondaryjoin="Node.id==node_to_node.c.right_node_id", - backref="left_nodes" + right_nodes = relationship( + "Node", + secondary="node_to_node", + primaryjoin="Node.id==node_to_node.c.left_node_id", + secondaryjoin="Node.id==node_to_node.c.right_node_id", + backref="left_nodes", ) .. warning:: When passed as a Python-evaluable string, the @@ -588,26 +604,38 @@ to ``node.c.id``:: metadata_obj = MetaData() mapper_registry = registry() - node_to_node = Table("node_to_node", metadata_obj, + node_to_node = Table( + "node_to_node", + metadata_obj, Column("left_node_id", Integer, ForeignKey("node.id"), primary_key=True), - Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True) + Column("right_node_id", Integer, ForeignKey("node.id"), primary_key=True), ) - node = Table("node", metadata_obj, - Column('id', Integer, primary_key=True), - Column('label', String) + node = Table( + "node", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("label", String), ) + + class Node(object): pass - mapper_registry.map_imperatively(Node, node, properties={ - 'right_nodes':relationship(Node, - secondary=node_to_node, - primaryjoin=node.c.id==node_to_node.c.left_node_id, - secondaryjoin=node.c.id==node_to_node.c.right_node_id, - backref="left_nodes" - )}) + mapper_registry.map_imperatively( + Node, + node, + properties={ + "right_nodes": relationship( + Node, + secondary=node_to_node, + primaryjoin=node.c.id == node_to_node.c.left_node_id, + secondaryjoin=node.c.id == node_to_node.c.right_node_id, + backref="left_nodes", + ) + }, + ) Note that in both examples, the :paramref:`_orm.relationship.backref` keyword specifies a ``left_nodes`` backref - when @@ -649,35 +677,38 @@ target consisting of multiple tables. Below is an example of such a join condition (requires version 0.9.2 at least to function as is):: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + + d = relationship( + "D", + secondary="join(B, D, B.d_id == D.id)." "join(C, C.d_id == D.id)", + primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", + secondaryjoin="D.id == B.d_id", + uselist=False, + viewonly=True, + ) - d = relationship("D", - secondary="join(B, D, B.d_id == D.id)." - "join(C, C.d_id == D.id)", - primaryjoin="and_(A.b_id == B.id, A.id == C.a_id)", - secondaryjoin="D.id == B.d_id", - uselist=False, - viewonly=True - ) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - d_id = Column(ForeignKey('d.id')) + d_id = Column(ForeignKey("d.id")) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) - d_id = Column(ForeignKey('d.id')) + a_id = Column(ForeignKey("a.id")) + d_id = Column(ForeignKey("d.id")) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) @@ -749,33 +780,37 @@ entities ``C`` and ``D``, which also must have rows that line up with the rows in both ``A`` and ``B`` simultaneously:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) + b_id = Column(ForeignKey("b.id")) + class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) + class C(Base): - __tablename__ = 'c' + __tablename__ = "c" id = Column(Integer, primary_key=True) - a_id = Column(ForeignKey('a.id')) + a_id = Column(ForeignKey("a.id")) some_c_value = Column(String) + class D(Base): - __tablename__ = 'd' + __tablename__ = "d" id = Column(Integer, primary_key=True) - c_id = Column(ForeignKey('c.id')) - b_id = Column(ForeignKey('b.id')) + c_id = Column(ForeignKey("c.id")) + b_id = Column(ForeignKey("b.id")) some_d_value = Column(String) + # 1. set up the join() as a variable, so we can refer # to it in the mapping multiple times. j = join(B, D, D.b_id == B.id).join(C, C.id == D.c_id) @@ -827,9 +862,10 @@ so in terms of ``B_viacd_subquery`` rather than ``B`` directly: .. sourcecode:: python+sql ( - sess.query(A).join(A.b). - filter(B_viacd_subquery.some_b_column == "some b"). - order_by(B_viacd_subquery.id) + sess.query(A) + .join(A.b) + .filter(B_viacd_subquery.some_b_column == "some b") + .order_by(B_viacd_subquery.id) ).all() {opensql}SELECT a.id AS a_id, a.b_id AS a_b_id @@ -851,35 +887,32 @@ illustrates a non-primary mapper relationship that will load the first ten items for each collection:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) + partition = select( - B, - func.row_number().over( - order_by=B.id, partition_by=B.a_id - ).label('index') + B, func.row_number().over(order_by=B.id, partition_by=B.a_id).label("index") ).alias() partitioned_b = aliased(B, partition) A.partitioned_bs = relationship( - partitioned_b, - primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) + partitioned_b, primaryjoin=and_(partitioned_b.a_id == A.id, partition.c.index < 10) ) We can use the above ``partitioned_bs`` relationship with most of the loader strategies, such as :func:`.selectinload`:: for a1 in s.query(A).options(selectinload(A.partitioned_bs)): - print(a1.partitioned_bs) # <-- will be no more than ten objects + print(a1.partitioned_bs) # <-- will be no more than ten objects Where above, the "selectinload" query looks like: @@ -921,7 +954,7 @@ conjunction with :class:`_query.Query` as follows: .. sourcecode:: python class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) @property @@ -934,4 +967,4 @@ of special Python attributes. .. seealso:: - :ref:`mapper_hybrids` \ No newline at end of file + :ref:`mapper_hybrids` diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst index 0aca6cd0c97..fdb27806f47 100644 --- a/doc/build/orm/loading.rst +++ b/doc/build/orm/loading.rst @@ -1,3 +1,3 @@ :orphan: -Moved! :doc:`/orm/loading_relationships` \ No newline at end of file +Moved! :doc:`/orm/loading_relationships` diff --git a/doc/build/orm/loading_columns.rst b/doc/build/orm/loading_columns.rst index de10901e463..a50ac07b971 100644 --- a/doc/build/orm/loading_columns.rst +++ b/doc/build/orm/loading_columns.rst @@ -26,8 +26,9 @@ attribute is first referenced on the individual object instance:: from sqlalchemy.orm import deferred from sqlalchemy import Integer, String, Text, Binary, Column + class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) @@ -38,9 +39,9 @@ attribute is first referenced on the individual object instance:: Classical mappings as always place the usage of :func:`_orm.deferred` in the ``properties`` dictionary against the table-bound :class:`_schema.Column`:: - mapper_registry.map_imperatively(Book, book_table, properties={ - 'photo':deferred(book_table.c.photo) - }) + mapper_registry.map_imperatively( + Book, book_table, properties={"photo": deferred(book_table.c.photo)} + ) Deferred columns can be associated with a "group" name, so that they load together when any of them are first accessed. The example below defines a @@ -49,15 +50,15 @@ photos will be loaded in one SELECT statement. The ``.excerpt`` will be loaded separately when it is accessed:: class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = Column(String(2000)) excerpt = deferred(Column(Text)) - photo1 = deferred(Column(Binary), group='photos') - photo2 = deferred(Column(Binary), group='photos') - photo3 = deferred(Column(Binary), group='photos') + photo1 = deferred(Column(Binary), group="photos") + photo2 = deferred(Column(Binary), group="photos") + photo3 = deferred(Column(Binary), group="photos") .. _deferred_options: @@ -73,7 +74,7 @@ basic query options are :func:`_orm.defer` and from sqlalchemy.orm import undefer query = session.query(Book) - query = query.options(defer('summary'), undefer('excerpt')) + query = query.options(defer("summary"), undefer("excerpt")) query.all() Above, the "summary" column will not load until accessed, and the "excerpt" @@ -85,7 +86,7 @@ using :func:`_orm.undefer_group`, sending in the group name:: from sqlalchemy.orm import undefer_group query = session.query(Book) - query.options(undefer_group('photos')).all() + query.options(undefer_group("photos")).all() .. _deferred_loading_w_multiple: @@ -117,8 +118,8 @@ those explicitly specified:: query = session.query(Author) query = query.options( - joinedload(Author.books).load_only(Book.summary, Book.excerpt), - ) + joinedload(Author.books).load_only(Book.summary, Book.excerpt), + ) Option structures as above can also be organized in more complex ways, such as hierarchically using the :meth:`_orm.Load.options` @@ -132,14 +133,13 @@ may be used:: query = session.query(Author) query = query.options( - joinedload(Author.book).options( - load_only(Book.summary, Book.excerpt), - joinedload(Book.citations).options( - joinedload(Citation.author), - defer(Citation.fulltext) - ) - ) - ) + joinedload(Author.book).options( + load_only(Book.summary, Book.excerpt), + joinedload(Book.citations).options( + joinedload(Citation.author), defer(Citation.fulltext) + ), + ) + ) .. versionadded:: 1.3.6 Added :meth:`_orm.Load.options` to allow easier construction of hierarchies of loader options. @@ -154,7 +154,7 @@ to create the same structure as we did above using :meth:`_orm.Load.options` as: query = query.options( joinedload(Author.book).load_only(Book.summary, Book.excerpt), defaultload(Author.book).joinedload(Book.citations).joinedload(Citation.author), - defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext) + defaultload(Author.book).defaultload(Book.citations).defer(Citation.fulltext), ) .. seealso:: @@ -173,8 +173,7 @@ the "summary" and "excerpt" columns, we could say:: from sqlalchemy.orm import defer from sqlalchemy.orm import undefer - session.query(Book).options( - defer('*'), undefer("summary"), undefer("excerpt")) + session.query(Book).options(defer("*"), undefer("summary"), undefer("excerpt")) Above, the :func:`.defer` option is applied using a wildcard to all column attributes on the ``Book`` class. Then, the :func:`.undefer` option is used @@ -208,9 +207,7 @@ both at once. Using :class:`_orm.Load` looks like:: from sqlalchemy.orm import Load query = session.query(Book, Author).join(Book.author) - query = query.options( - Load(Book).load_only(Book.summary, Book.excerpt) - ) + query = query.options(Load(Book).load_only(Book.summary, Book.excerpt)) Above, :class:`_orm.Load` is used in conjunction with the exclusionary option :func:`.load_only` so that the deferral of all other columns only takes @@ -246,16 +243,15 @@ Deferred "raiseload" can be configured at the mapper level via class Book(Base): - __tablename__ = 'book' + __tablename__ = "book" book_id = Column(Integer, primary_key=True) title = Column(String(200), nullable=False) summary = deferred(Column(String(2000)), raiseload=True) excerpt = deferred(Column(Text), raiseload=True) - book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() - + book_w_excerpt = session.query(Book).options(undefer(Book.excerpt)).first() Column Deferral API ------------------- @@ -286,8 +282,8 @@ The bundle allows columns to be grouped together:: from sqlalchemy.orm import Bundle - bn = Bundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): + bn = Bundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): print(row.mybundle.data1, row.mybundle.data2) The bundle can be subclassed to provide custom behaviors when results @@ -300,13 +296,14 @@ return structure with a straight Python dictionary:: from sqlalchemy.orm import Bundle + class DictBundle(Bundle): def create_row_processor(self, query, procs, labels): """Override create_row_processor to return values as dictionaries""" + def proc(row): - return dict( - zip(labels, (proc(row) for proc in procs)) - ) + return dict(zip(labels, (proc(row) for proc in procs))) + return proc .. note:: @@ -322,9 +319,9 @@ return structure with a straight Python dictionary:: A result from the above bundle will return dictionary values:: - bn = DictBundle('mybundle', MyClass.data1, MyClass.data2) - for row in session.query(bn).filter(bn.c.data1 == 'd1'): - print(row.mybundle['data1'], row.mybundle['data2']) + bn = DictBundle("mybundle", MyClass.data1, MyClass.data2) + for row in session.query(bn).filter(bn.c.data1 == "d1"): + print(row.mybundle["data1"], row.mybundle["data2"]) The :class:`.Bundle` construct is also integrated into the behavior of :func:`.composite`, where it is used to return composite attributes as objects diff --git a/doc/build/orm/loading_relationships.rst b/doc/build/orm/loading_relationships.rst index 5a1d5151d42..ad77f6e0de5 100644 --- a/doc/build/orm/loading_relationships.rst +++ b/doc/build/orm/loading_relationships.rst @@ -88,10 +88,10 @@ For example, to configure a relationship to use joined eager loading when the parent object is queried:: class Parent(Base): - __tablename__ = 'parent' + __tablename__ = "parent" id = Column(Integer, primary_key=True) - children = relationship("Child", lazy='joined') + children = relationship("Child", lazy="joined") Above, whenever a collection of ``Parent`` objects are loaded, each ``Parent`` will also have its ``children`` collection populated, using @@ -128,16 +128,16 @@ The loader options can also be "chained" using **method chaining** to specify how loading should occur further levels deep:: session.query(Parent).options( - joinedload(Parent.children). - subqueryload(Child.subelements)).all() + joinedload(Parent.children).subqueryload(Child.subelements) + ).all() Chained loader options can be applied against a "lazy" loaded collection. This means that when a collection or association is lazily loaded upon access, the specified option will then take effect:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() Above, the query will return ``Parent`` objects without the ``children`` collections loaded. When the ``children`` collection on a particular @@ -149,9 +149,7 @@ The above examples, using :class:`_orm.Query`, are now referred to as :term:`1.x style` queries. The options system is available as well for :term:`2.0 style` queries using the :meth:`_sql.Select.options` method:: - stmt = select(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)) + stmt = select(Parent).options(lazyload(Parent.children).subqueryload(Child.subelements)) result = session.execute(stmt) @@ -191,18 +189,14 @@ Using method chaining, the loader style of each link in the path is explicitly stated. To navigate along a path without changing the existing loader style of a particular attribute, the :func:`.defaultload` method/function may be used:: - session.query(A).options( - defaultload(A.atob). - joinedload(B.btoc)).all() + session.query(A).options(defaultload(A.atob).joinedload(B.btoc)).all() A similar approach can be used to specify multiple sub-options at once, using the :meth:`_orm.Load.options` method:: session.query(A).options( - defaultload(A.atob).options( - joinedload(B.btoc), - joinedload(B.btod) - )).all() + defaultload(A.atob).options(joinedload(B.btoc), joinedload(B.btod)) + ).all() .. versionadded:: 1.3.6 added :meth:`_orm.Load.options` @@ -219,8 +213,8 @@ the :meth:`_orm.Load.options` method:: memory. For example, given the previous example:: session.query(Parent).options( - lazyload(Parent.children). - subqueryload(Child.subelements)).all() + lazyload(Parent.children).subqueryload(Child.subelements) + ).all() if the ``children`` collection on a particular ``Parent`` object loaded by the above query is expired (such as when a :class:`.Session` object's @@ -235,8 +229,8 @@ the :meth:`_orm.Load.options` method:: # change the options on Parent objects that were already loaded session.query(Parent).populate_existing().options( - lazyload(Parent.children). - lazyload(Child.subelements)).all() + lazyload(Parent.children).lazyload(Child.subelements) + ).all() If the objects loaded above are fully cleared from the :class:`.Session`, such as due to garbage collection or that :meth:`.Session.expunge_all` @@ -310,6 +304,7 @@ replaces the behavior of lazy loading with an informative error being raised:: from sqlalchemy.orm import raiseload + session.query(User).options(raiseload(User.addresses)) Above, a ``User`` object loaded from the above query will not have @@ -320,8 +315,7 @@ access this attribute, an ORM exception is raised. indicate that all relationships should use this strategy. For example, to set up only one attribute as eager loading, and all the rest as raise:: - session.query(Order).options( - joinedload(Order.items), raiseload('*')) + session.query(Order).options(joinedload(Order.items), raiseload("*")) The above wildcard will apply to **all** relationships not just on ``Order`` besides ``items``, but all those on the ``Item`` objects as well. To set up @@ -330,14 +324,11 @@ path with :class:`_orm.Load`:: from sqlalchemy.orm import Load - session.query(Order).options( - joinedload(Order.items), Load(Order).raiseload('*')) + session.query(Order).options(joinedload(Order.items), Load(Order).raiseload("*")) Conversely, to set up the raise for just the ``Item`` objects:: - session.query(Order).options( - joinedload(Order.items).raiseload('*')) - + session.query(Order).options(joinedload(Order.items).raiseload("*")) The :func:`.raiseload` option applies only to relationship attributes. For column-oriented attributes, the :func:`.defer` option supports the @@ -382,9 +373,9 @@ using the :func:`_orm.joinedload` loader option: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -409,13 +400,12 @@ at the mapping level via the :paramref:`_orm.relationship.innerjoin` flag:: class Address(Base): # ... - user_id = Column(ForeignKey('users.id'), nullable=False) + user_id = Column(ForeignKey("users.id"), nullable=False) user = relationship(User, lazy="joined", innerjoin=True) At the query option level, via the :paramref:`_orm.joinedload.innerjoin` flag:: - session.query(Address).options( - joinedload(Address.user, innerjoin=True)) + session.query(Address).options(joinedload(Address.user, innerjoin=True)) The JOIN will right-nest itself when applied in a chain that includes an OUTER JOIN: @@ -423,8 +413,8 @@ an OUTER JOIN: .. sourcecode:: python+sql >>> session.query(User).options( - ... joinedload(User.addresses). - ... joinedload(Address.widgets, innerjoin=True)).all() + ... joinedload(User.addresses).joinedload(Address.widgets, innerjoin=True) + ... ).all() {opensql}SELECT widgets_1.id AS widgets_1_id, widgets_1.name AS widgets_1_name, @@ -519,10 +509,13 @@ named in the query: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -544,10 +537,13 @@ address is to use :meth:`_query.Query.join`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql} SELECT users.id AS users_id, @@ -568,11 +564,14 @@ are ordering on, the other is used anonymously to load the contents of the .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... order_by(Address.email_address).all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .order_by(Address.email_address) + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -600,12 +599,14 @@ to see why :func:`joinedload` does what it does, consider if we were .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(joinedload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(joinedload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address, @@ -634,12 +635,14 @@ into :func:`.subqueryload`: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... join(User.addresses).\ - ... options(subqueryload(User.addresses)).\ - ... filter(User.name=='jack').\ - ... filter(Address.email_address=='someaddress@foo.com').\ - ... all() + >>> jack = ( + ... session.query(User) + ... .join(User.addresses) + ... .options(subqueryload(User.addresses)) + ... .filter(User.name == "jack") + ... .filter(Address.email_address == "someaddress@foo.com") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -688,9 +691,12 @@ the collection members to load them at once: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(subqueryload(User.addresses)).\ - ... filter_by(name='jack').all() + >>> jack = ( + ... session.query(User) + ... .options(subqueryload(User.addresses)) + ... .filter_by(name="jack") + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -752,18 +758,15 @@ the same ordering as used by the parent query. Without it, there is a chance that the inner query could return the wrong rows:: # incorrect, no ORDER BY - session.query(User).options( - subqueryload(User.addresses)).first() + session.query(User).options(subqueryload(User.addresses)).first() # incorrect if User.name is not unique - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name).first() + session.query(User).options(subqueryload(User.addresses)).order_by(User.name).first() # correct - session.query(User).options( - subqueryload(User.addresses) - ).order_by(User.name, User.id).first() + session.query(User).options(subqueryload(User.addresses)).order_by( + User.name, User.id + ).first() .. seealso:: @@ -793,9 +796,12 @@ order to load related associations: .. sourcecode:: python+sql - >>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter(or_(User.name == 'jack', User.name == 'ed')).all() + >>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter(or_(User.name == "jack", User.name == "ed")) + ... .all() + ... ) {opensql}SELECT users.id AS users_id, users.name AS users_name, @@ -829,8 +835,7 @@ value from the parent object is used: .. sourcecode:: python+sql - >>> session.query(Address).\ - ... options(selectinload(Address.user)).all() + >>> session.query(Address).options(selectinload(Address.user)).all() {opensql}SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1012,7 +1017,7 @@ attributes not otherwise specified in the :class:`_query.Query`. This feature is available by passing the string ``'*'`` as the argument to any of these options:: - session.query(MyClass).options(lazyload('*')) + session.query(MyClass).options(lazyload("*")) Above, the ``lazyload('*')`` option will supersede the ``lazy`` setting of all :func:`_orm.relationship` constructs in use for that query, @@ -1028,10 +1033,7 @@ query, such as :func:`.eagerload`, :func:`.subqueryload`, etc. The query below will still use joined loading for the ``widget`` relationship:: - session.query(MyClass).options( - lazyload('*'), - joinedload(MyClass.widget) - ) + session.query(MyClass).options(lazyload("*"), joinedload(MyClass.widget)) If multiple ``'*'`` options are passed, the last one overrides those previously passed. @@ -1045,8 +1047,7 @@ we can instruct all relationships on ``Address`` only to use lazy loading by first applying the :class:`_orm.Load` object, then specifying the ``*`` as a chained option:: - session.query(User, Address).options( - Load(Address).lazyload('*')) + session.query(User, Address).options(Load(Address).lazyload("*")) Above, all relationships on ``Address`` will be set to a lazy load. @@ -1073,18 +1074,18 @@ explicitly. Below, we specify a join between ``User`` and ``Address`` and additionally establish this as the basis for eager loading of ``User.addresses``:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) addresses = relationship("Address") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... - q = session.query(User).join(User.addresses).\ - options(contains_eager(User.addresses)) + q = session.query(User).join(User.addresses).options(contains_eager(User.addresses)) If the "eager" portion of the statement is "aliased", the path should be specified using :meth:`.PropComparator.of_type`, which allows @@ -1096,9 +1097,11 @@ the specific :func:`_orm.aliased` construct to be passed: adalias = aliased(Address) # construct a Query object which expects the "addresses" results - query = session.query(User).\ - outerjoin(User.addresses.of_type(adalias)).\ - options(contains_eager(User.addresses.of_type(adalias))) + query = ( + session.query(User) + .outerjoin(User.addresses.of_type(adalias)) + .options(contains_eager(User.addresses.of_type(adalias))) + ) # get results normally r = query.all() @@ -1117,9 +1120,7 @@ The path given as the argument to :func:`.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the option would be used as:: - query(User).options( - contains_eager(User.orders). - contains_eager(Order.items)) + query(User).options(contains_eager(User.orders).contains_eager(Order.items)) Using contains_eager() to load a custom-filtered collection result ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -1136,11 +1137,13 @@ routing it using :func:`_orm.contains_eager`, also using :meth:`_query.Query.populate_existing` to ensure any already-loaded collections are overwritten:: - q = session.query(User).\ - join(User.addresses).\ - filter(Address.email_address.like('%@aol.com')).\ - options(contains_eager(User.addresses)).\ - populate_existing() + q = ( + session.query(User) + .join(User.addresses) + .filter(Address.email_address.like("%@aol.com")) + .options(contains_eager(User.addresses)) + .populate_existing() + ) The above query will load only ``User`` objects which contain at least ``Address`` object that contains the substring ``'aol.com'`` in its @@ -1204,20 +1207,16 @@ Given the following mapping:: class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) - b_id = Column(ForeignKey('b.id')) - b = relationship( - "B", - backref=backref("a", uselist=False), - lazy='joined') + b_id = Column(ForeignKey("b.id")) + b = relationship("B", backref=backref("a", uselist=False), lazy="joined") class B(Base): - __tablename__ = 'b' + __tablename__ = "b" id = Column(Integer, primary_key=True) - If we query for an ``A`` row, and then ask it for ``a.b.a``, we will get an extra SELECT:: @@ -1232,10 +1231,11 @@ can create an on-load rule to populate this for us:: from sqlalchemy import event from sqlalchemy.orm import attributes + @event.listens_for(A, "load") def load_b(target, context): - if 'b' in target.__dict__: - attributes.set_committed_value(target.b, 'a', target) + if "b" in target.__dict__: + attributes.set_committed_value(target.b, "a", target) Now when we query for ``A``, we will get ``A.b`` from the joined eager load, and ``A.b.a`` from our event: @@ -1253,7 +1253,6 @@ and ``A.b.a`` from our event: (1, 0) {stop}assert a1.b.a is a1 - Relationship Loader API ----------------------- diff --git a/doc/build/orm/mapped_attributes.rst b/doc/build/orm/mapped_attributes.rst index a4fd3115d5d..5ee7d6498ff 100644 --- a/doc/build/orm/mapped_attributes.rst +++ b/doc/build/orm/mapped_attributes.rst @@ -19,15 +19,16 @@ issued when the ORM is populating the object:: from sqlalchemy.orm import validates + class EmailAddress(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email = Column(String) - @validates('email') + @validates("email") def validate_email(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simple email validation") return address @@ -42,18 +43,18 @@ collection:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses') + @validates("addresses") def validate_address(self, key, address): - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address - The validation function by default does not get emitted for collection remove events, as the typical expectation is that a value being discarded doesn't require validation. However, :func:`.validates` supports reception @@ -63,18 +64,18 @@ argument which if ``True`` indicates that the operation is a removal:: from sqlalchemy.orm import validates + class User(Base): # ... addresses = relationship("Address") - @validates('addresses', include_removes=True) + @validates("addresses", include_removes=True) def validate_address(self, key, address, is_remove): if is_remove: - raise ValueError( - "not allowed to remove items from the collection") + raise ValueError("not allowed to remove items from the collection") else: - if '@' not in address.email: + if "@" not in address.email: raise ValueError("failed simplified email validation") return address @@ -85,14 +86,15 @@ event occurs as a result of a backref:: from sqlalchemy.orm import validates + class User(Base): # ... - addresses = relationship("Address", backref='user') + addresses = relationship("Address", backref="user") - @validates('addresses', include_backrefs=False) + @validates("addresses", include_backrefs=False) def validate_address(self, key, address): - if '@' not in address: + if "@" not in address: raise ValueError("failed simplified email validation") return address @@ -131,7 +133,7 @@ plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate this using Python 2.6-style properties:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -158,8 +160,9 @@ usable with :class:`_query.Query`. To provide these, we instead use the from sqlalchemy.ext.hybrid import hybrid_property + class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -180,6 +183,7 @@ that is, from the ``EmailAddress`` class directly: .. sourcecode:: python+sql from sqlalchemy.orm import Session + session = Session() {sql}address = session.query(EmailAddress).\ @@ -189,14 +193,12 @@ that is, from the ``EmailAddress`` class directly: FROM address WHERE address.email = ? ('address@example.com',) - {stop} - address.email = 'otheraddress@example.com' + address.email = "otheraddress@example.com" {sql}session.commit() UPDATE address SET email=? WHERE address.id = ? ('otheraddress@example.com', 1) COMMIT - {stop} The :class:`~.hybrid_property` also allows us to change the behavior of the attribute, including defining separate behaviors when the attribute is @@ -206,7 +208,7 @@ host name automatically, we might define two sets of string manipulation logic:: class EmailAddress(Base): - __tablename__ = 'email_address' + __tablename__ = "email_address" id = Column(Integer, primary_key=True) @@ -245,7 +247,6 @@ attribute, a SQL function is rendered which produces the same effect: FROM address WHERE substr(address.email, ?, length(address.email) - ?) = ? (0, 12, 'address') - {stop} Read more about Hybrids at :ref:`hybrids_toplevel`. @@ -261,9 +262,10 @@ In the most basic sense, the synonym is an easy way to make a certain attribute available by an additional name:: from sqlalchemy.orm import synonym - + + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) job_status = Column(String(50)) @@ -274,19 +276,19 @@ The above class ``MyClass`` has two attributes, ``.job_status`` and ``.status`` that will behave as one attribute, both at the expression level:: - >>> print(MyClass.job_status == 'some_status') + >>> print(MyClass.job_status == "some_status") my_table.job_status = :job_status_1 - >>> print(MyClass.status == 'some_status') + >>> print(MyClass.status == "some_status") my_table.job_status = :job_status_1 and at the instance level:: - >>> m1 = MyClass(status='x') + >>> m1 = MyClass(status="x") >>> m1.status, m1.job_status ('x', 'x') - >>> m1.job_status = 'y' + >>> m1.job_status = "y" >>> m1.status, m1.job_status ('y', 'y') @@ -299,7 +301,7 @@ a user-defined :term:`descriptor`. We can supply our ``status`` synonym with a ``@property``:: class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) @@ -315,8 +317,9 @@ using the :func:`.synonym_for` decorator:: from sqlalchemy.ext.declarative import synonym_for + class MyClass(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) status = Column(String(50)) diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst index eefd1d5d685..47af9b22c4c 100644 --- a/doc/build/orm/mapped_sql_expr.rst +++ b/doc/build/orm/mapped_sql_expr.rst @@ -21,8 +21,9 @@ will provide for us the ``fullname``, which is the string concatenation of the t from sqlalchemy.ext.hybrid import hybrid_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -51,8 +52,9 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import case + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -66,9 +68,12 @@ needs to be present inside the hybrid, using the ``if`` statement in Python and @fullname.expression def fullname(cls): - return case([ - (cls.firstname != None, cls.firstname + " " + cls.lastname), - ], else_ = cls.lastname) + return case( + [ + (cls.firstname != None, cls.firstname + " " + cls.lastname), + ], + else_=cls.lastname, + ) .. _mapper_column_property_sql_expressions: @@ -95,8 +100,9 @@ follows:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -115,28 +121,30 @@ of ``Address`` objects available for a particular ``User``:: Base = declarative_base() + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id')) + user_id = Column(Integer, ForeignKey("user.id")) + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) In the above example, we define a :func:`_expression.ScalarSelect` construct like the following:: stmt = ( - select(func.count(Address.id)). - where(Address.user_id==id). - correlate_except(Address). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == id) + .correlate_except(Address) + .scalar_subquery() ) Above, we first use :func:`_sql.select` to create a :class:`_sql.Select` @@ -166,9 +174,7 @@ to add an additional property after the fact:: # only works if a declarative base class is in use User.address_count = column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)).where(Address.user_id == User.id).scalar_subquery() ) When using mapping styles that don't use :func:`_orm.declarative_base`, @@ -180,9 +186,10 @@ which can be obtained using :func:`_sa.inspect`:: reg = registry() + @reg.mapped class User: - __tablename__ = 'user' + __tablename__ = "user" # ... additional mapping directives @@ -191,11 +198,12 @@ which can be obtained using :func:`_sa.inspect`:: # works for any kind of mapping from sqlalchemy import inspect + inspect(User).add_property( column_property( - select(func.count(Address.id)). - where(Address.user_id==User.id). - scalar_subquery() + select(func.count(Address.id)) + .where(Address.user_id == User.id) + .scalar_subquery() ) ) @@ -205,17 +213,19 @@ association table to both tables in a relationship:: from sqlalchemy import and_ + class Author(Base): # ... book_count = column_property( - select(func.count(books.c.id) - ).where( + select(func.count(books.c.id)) + .where( and_( - book_authors.c.author_id==authors.c.id, - book_authors.c.book_id==books.c.id + book_authors.c.author_id == authors.c.id, + book_authors.c.book_id == books.c.id, ) - ).scalar_subquery() + ) + .scalar_subquery() ) .. _mapper_column_property_sql_expressions_composed: @@ -238,21 +248,20 @@ attribute, which is itself a :class:`.ColumnProperty`:: class File(Base): - __tablename__ = 'file' + __tablename__ = "file" id = Column(Integer, primary_key=True) name = Column(String(64)) extension = Column(String(8)) - filename = column_property(name + '.' + extension) - path = column_property('C:/' + filename.expression) + filename = column_property(name + "." + extension) + path = column_property("C:/" + filename.expression) When the ``File`` class is used in expressions normally, the attributes assigned to ``filename`` and ``path`` are usable directly. The use of the :attr:`.ColumnProperty.expression` attribute is only necessary when using the :class:`.ColumnProperty` directly within the mapping definition:: - q = session.query(File.path).filter(File.filename == 'foo.txt') - + q = session.query(File.path).filter(File.filename == "foo.txt") Using a plain descriptor ------------------------ @@ -269,19 +278,18 @@ which is then used to emit a query:: from sqlalchemy.orm import object_session from sqlalchemy import select, func + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @property def address_count(self): - return object_session(self).\ - scalar( - select(func.count(Address.id)).\ - where(Address.user_id==self.id) - ) + return object_session(self).scalar( + select(func.count(Address.id)).where(Address.user_id == self.id) + ) The plain descriptor approach is useful as a last resort, but is less performant in the usual case than both the hybrid and column property approaches, in that @@ -310,8 +318,9 @@ may be applied:: from sqlalchemy.orm import query_expression + class A(Base): - __tablename__ = 'a' + __tablename__ = "a" id = Column(Integer, primary_key=True) x = Column(Integer) y = Column(Integer) @@ -322,8 +331,8 @@ We can then query for objects of type ``A``, applying an arbitrary SQL expression to be populated into ``A.expr``:: from sqlalchemy.orm import with_expression - q = session.query(A).options( - with_expression(A.expr, A.x + A.y)) + + q = session.query(A).options(with_expression(A.expr, A.x + A.y)) The :func:`.query_expression` mapping has these caveats: @@ -341,8 +350,12 @@ The :func:`.query_expression` mapping has these caveats: To ensure the attribute is re-loaded, use :meth:`_orm.Query.populate_existing`:: - obj = session.query(A).populate_existing().options( - with_expression(A.expr, some_expr)).first() + obj = ( + session.query(A) + .populate_existing() + .options(with_expression(A.expr, some_expr)) + .first() + ) * The query_expression value **does not refresh when the object is expired**. Once the object is expired, either via :meth:`.Session.expire` @@ -357,18 +370,24 @@ The :func:`.query_expression` mapping has these caveats: ad-hoc expression; that is, this won't work:: # wont work - q = session.query(A).options( - with_expression(A.expr, A.x + A.y) - ).filter(A.expr > 5).order_by(A.expr) + q = ( + session.query(A) + .options(with_expression(A.expr, A.x + A.y)) + .filter(A.expr > 5) + .order_by(A.expr) + ) The ``A.expr`` expression will resolve to NULL in the above WHERE clause and ORDER BY clause. To use the expression throughout the query, assign to a variable and use that:: a_expr = A.x + A.y - q = session.query(A).options( - with_expression(A.expr, a_expr) - ).filter(a_expr > 5).order_by(a_expr) + q = ( + session.query(A) + .options(with_expression(A.expr, a_expr)) + .filter(a_expr > 5) + .order_by(a_expr) + ) .. versionadded:: 1.2 diff --git a/doc/build/orm/mapping_columns.rst b/doc/build/orm/mapping_columns.rst index 788d5776ef9..1ec8636b6d2 100644 --- a/doc/build/orm/mapping_columns.rst +++ b/doc/build/orm/mapping_columns.rst @@ -46,9 +46,9 @@ The name assigned to the Python attribute which maps to it that way, as we illustrate here in a Declarative mapping:: class User(Base): - __tablename__ = 'user' - id = Column('user_id', Integer, primary_key=True) - name = Column('user_name', String(50)) + __tablename__ = "user" + id = Column("user_id", Integer, primary_key=True) + name = Column("user_name", String(50)) Where above ``User.id`` resolves to a column named ``user_id`` and ``User.name`` resolves to a column named ``user_name``. @@ -65,11 +65,14 @@ The corresponding technique for an :term:`imperative` mapping is to place the desired key in the :paramref:`_orm.mapper.properties` dictionary with the desired key:: - mapper_registry.map_imperatively(User, user_table, properties={ - 'id': user_table.c.user_id, - 'name': user_table.c.user_name, - }) - + mapper_registry.map_imperatively( + User, + user_table, + properties={ + "id": user_table.c.user_id, + "name": user_table.c.user_name, + }, + ) .. _mapper_automated_reflection_schemes: @@ -91,14 +94,13 @@ instance:: @event.listens_for(Base.metadata, "column_reflect") def column_reflect(inspector, table, column_info): # set column.key = "attr_" - column_info['key'] = "attr_%s" % column_info['name'].lower() + column_info["key"] = "attr_%s" % column_info["name"].lower() With the above event, the reflection of :class:`_schema.Column` objects will be intercepted with our event that adds a new ".key" element, such as in a mapping as below:: class MyClass(Base): - __table__ = Table("some_table", Base.metadata, - autoload_with=some_engine) + __table__ = Table("some_table", Base.metadata, autoload_with=some_engine) The approach also works with both the :class:`.DeferredReflection` base class as well as with the :ref:`automap_toplevel` extension. For automap @@ -131,8 +133,9 @@ result in the former value being loaded first:: from sqlalchemy.orm import column_property + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = column_property(Column(String(50)), active_history=True) @@ -156,7 +159,7 @@ that is the string concatenation of the ``firstname`` and ``lastname`` columns:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) firstname = Column(String(50)) lastname = Column(String(50)) @@ -211,7 +214,7 @@ map such a table as in the following example:: metadata, Column("user_id", String(40), nullable=False), Column("group_id", String(40), nullable=False), - UniqueConstraint("user_id", "group_id") + UniqueConstraint("user_id", "group_id"), ) @@ -220,9 +223,7 @@ map such a table as in the following example:: class GroupUsers(Base): __table__ = group_users - __mapper_args__ = { - "primary_key": [group_users.c.user_id, group_users.c.group_id] - } + __mapper_args__ = {"primary_key": [group_users.c.user_id, group_users.c.group_id]} Above, the ``group_users`` table is an association table of some kind with string columns ``user_id`` and ``group_id``, but no primary key is set up; @@ -250,9 +251,7 @@ For example:: class User(Base): __table__ = user_table - __mapper_args__ = { - 'include_properties' :['user_id', 'user_name'] - } + __mapper_args__ = {"include_properties": ["user_id", "user_name"]} ...will map the ``User`` class to the ``user_table`` table, only including the ``user_id`` and ``user_name`` columns - the rest are not referenced. @@ -260,9 +259,7 @@ Similarly:: class Address(Base): __table__ = address_table - __mapper_args__ = { - 'exclude_properties' : ['street', 'city', 'state', 'zip'] - } + __mapper_args__ = {"exclude_properties": ["street", "city", "state", "zip"]} ...will map the ``Address`` class to the ``address_table`` table, including all columns present except ``street``, ``city``, ``state``, and ``zip``. @@ -282,8 +279,8 @@ should be included or excluded:: class UserAddress(Base): __table__ = user_table.join(addresses_table) __mapper_args__ = { - 'exclude_properties' :[address_table.c.id], - 'primary_key' : [user_table.c.id] + "exclude_properties": [address_table.c.id], + "primary_key": [user_table.c.id], } .. note:: diff --git a/doc/build/orm/mapping_styles.rst b/doc/build/orm/mapping_styles.rst index 84db8cb0870..edec17c14bf 100644 --- a/doc/build/orm/mapping_styles.rst +++ b/doc/build/orm/mapping_styles.rst @@ -72,7 +72,7 @@ used in a declarative table mapping:: # an example mapping using the base class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String) @@ -149,33 +149,40 @@ the :meth:`_orm.registry.map_imperatively` method:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('fullname', String(50)), - Column('nickname', String(12)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), + Column("fullname", String(50)), + Column("nickname", String(12)), ) + class User: pass - mapper_registry.map_imperatively(User, user_table) + mapper_registry.map_imperatively(User, user_table) Information about mapped attributes, such as relationships to other classes, are provided via the ``properties`` dictionary. The example below illustrates a second :class:`_schema.Table` object, mapped to a class called ``Address``, then linked to ``User`` via :func:`_orm.relationship`:: - address = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String(50)) - ) + address = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String(50)), + ) - mapper_registry.map_imperatively(User, user, properties={ - 'addresses' : relationship(Address, backref='user', order_by=address.c.id) - }) + mapper_registry.map_imperatively( + User, + user, + properties={ + "addresses": relationship(Address, backref="user", order_by=address.c.id) + }, + ) mapper_registry.map_imperatively(Address, address) @@ -314,8 +321,9 @@ all the attributes that are named. E.g.:: Base = declarative_base() + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(...) name = Column(...) @@ -324,7 +332,7 @@ all the attributes that are named. E.g.:: An object of type ``User`` above will have a constructor which allows ``User`` objects to be created as:: - u1 = User(name='some name', fullname='some fullname') + u1 = User(name="some name", fullname="some fullname") The above constructor may be customized by passing a Python callable to the :paramref:`_orm.registry.constructor` parameter which provides the @@ -337,15 +345,17 @@ The constructor also applies to imperative mappings:: mapper_registry = registry() user_table = Table( - 'user', + "user", mapper_registry.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) + Column("id", Integer, primary_key=True), + Column("name", String(50)), ) + class User: pass + mapper_registry.map_imperatively(User, user_table) The above class, mapped imperatively as described at :ref:`orm_imperative_mapping`, @@ -505,7 +515,7 @@ as well as specific history on modifications to attributes since the last flush: >>> insp.attrs.nickname.value 'nickname' - >>> u1.nickname = 'new nickname' + >>> u1.nickname = "new nickname" >>> insp.attrs.nickname.history History(added=['new nickname'], unchanged=(), deleted=['nickname']) diff --git a/doc/build/orm/nonstandard_mappings.rst b/doc/build/orm/nonstandard_mappings.rst index bf6b0f247d2..4bd2546e096 100644 --- a/doc/build/orm/nonstandard_mappings.rst +++ b/doc/build/orm/nonstandard_mappings.rst @@ -15,24 +15,27 @@ function creates a selectable unit comprised of multiple tables, complete with its own composite primary key, which can be mapped in the same way as a :class:`_schema.Table`:: - from sqlalchemy import Table, Column, Integer, \ - String, MetaData, join, ForeignKey + from sqlalchemy import Table, Column, Integer, String, MetaData, join, ForeignKey from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import column_property metadata_obj = MetaData() # define two Table objects - user_table = Table('user', metadata_obj, - Column('id', Integer, primary_key=True), - Column('name', String), - ) - - address_table = Table('address', metadata_obj, - Column('id', Integer, primary_key=True), - Column('user_id', Integer, ForeignKey('user.id')), - Column('email_address', String) - ) + user_table = Table( + "user", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("name", String), + ) + + address_table = Table( + "address", + metadata_obj, + Column("id", Integer, primary_key=True), + Column("user_id", Integer, ForeignKey("user.id")), + Column("email_address", String), + ) # define a join between them. This # takes place across the user.id and address.user_id @@ -104,9 +107,10 @@ may be used:: from sqlalchemy import event - @event.listens_for(PtoQ, 'before_update') + + @event.listens_for(PtoQ, "before_update") def receive_before_update(mapper, connection, target): - if target.some_required_attr_on_q is None: + if target.some_required_attr_on_q is None: connection.execute(q_table.insert(), {"id": target.id}) where above, a row is INSERTed into the ``q_table`` table by creating an @@ -128,15 +132,22 @@ includes a join to a subquery:: from sqlalchemy import select, func - subq = select( - func.count(orders.c.id).label('order_count'), - func.max(orders.c.price).label('highest_order'), - orders.c.customer_id - ).group_by(orders.c.customer_id).subquery() + subq = ( + select( + func.count(orders.c.id).label("order_count"), + func.max(orders.c.price).label("highest_order"), + orders.c.customer_id, + ) + .group_by(orders.c.customer_id) + .subquery() + ) + + customer_select = ( + select(customers, subq) + .join_from(customers, subq, customers.c.id == subq.c.customer_id) + .subquery() + ) - customer_select = select(customers, subq).join_from( - customers, subq, customers.c.id == subq.c.customer_id - ).subquery() class Customer(Base): __table__ = customer_select diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 112ac5a319f..09d1948e882 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -21,6 +21,7 @@ an attribute:: value = Column(Integer) + someobject = session.query(SomeClass).get(5) # set 'value' attribute to a SQL expression adding one @@ -89,10 +90,10 @@ This is most easily accomplished using the session = Session() # execute a string statement - result = session.execute("select * from table where id=:id", {'id':7}) + result = session.execute("select * from table where id=:id", {"id": 7}) # execute a SQL expression construct - result = session.execute(select(mytable).where(mytable.c.id==7)) + result = session.execute(select(mytable).where(mytable.c.id == 7)) The current :class:`~sqlalchemy.engine.Connection` held by the :class:`~sqlalchemy.orm.session.Session` is accessible using the @@ -118,13 +119,12 @@ proper context for the desired engine:: # need to specify mapper or class when executing result = session.execute( text("select * from table where id=:id"), - {'id':7}, - bind_arguments={'mapper': MyMappedClass} + {"id": 7}, + bind_arguments={"mapper": MyMappedClass}, ) result = session.execute( - select(mytable).where(mytable.c.id==7), - bind_arguments={'mapper': MyMappedClass} + select(mytable).where(mytable.c.id == 7), bind_arguments={"mapper": MyMappedClass} ) connection = session.connection(MyMappedClass) @@ -144,14 +144,15 @@ The ORM considers any attribute that was never set on an object as a "default" case; the attribute will be omitted from the INSERT statement:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True) + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the NULL value + # itself will persist this as the NULL value Omitting a column from the INSERT means that the column will have the NULL value set, *unless* the column has a default set up, @@ -161,29 +162,31 @@ behavior of SQLAlchemy's insert behavior with both client-side and server-side defaults:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1) session.add(obj) session.commit() # INSERT with the 'data' column omitted; the database - # itself will persist this as the value 'default' + # itself will persist this as the value 'default' However, in the ORM, even if one assigns the Python value ``None`` explicitly to the object, this is treated the **same** as though the value were never assigned:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column(String(50), nullable=True, server_default="default") + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM still omits it from the statement and the - # database will still persist this as the value 'default' + # the ORM still omits it from the statement and the + # database will still persist this as the value 'default' The above operation will persist into the ``data`` column the server default value of ``"default"`` and not SQL NULL, even though ``None`` @@ -200,9 +203,9 @@ on a per-instance level, we assign the attribute using the obj = MyObject(id=1, data=null()) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set as null(); - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value The :obj:`_expression.null` SQL construct always translates into the SQL NULL value being directly present in the target INSERT statement. @@ -215,18 +218,21 @@ a type where the ORM should treat the value ``None`` the same as any other value and pass it through, rather than omitting it as a "missing" value:: class MyObject(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) data = Column( - String(50).evaluates_none(), # indicate that None should always be passed - nullable=True, server_default="default") + String(50).evaluates_none(), # indicate that None should always be passed + nullable=True, + server_default="default", + ) + obj = MyObject(id=1, data=None) session.add(obj) session.commit() # INSERT with the 'data' column explicitly set to None; - # the ORM uses this directly, bypassing all client- - # and server-side defaults, and the database will - # persist this as the NULL value + # the ORM uses this directly, bypassing all client- + # and server-side defaults, and the database will + # persist this as the NULL value .. topic:: Evaluating None @@ -286,7 +292,7 @@ columns should be fetched immediately upon INSERT and sometimes UPDATE:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -315,7 +321,7 @@ This case is the same as case 1 above, except we don't specify :paramref:`.orm.mapper.eager_defaults`:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) timestamp = Column(DateTime(), server_default=func.now()) @@ -366,7 +372,7 @@ For an explicit sequence as we use with Oracle, this just means we are using the :class:`.Sequence` construct:: class MyOracleModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, Sequence("my_sequence"), primary_key=True) data = Column(String(50)) @@ -385,7 +391,7 @@ by a trigger, we use :class:`.FetchedValue`. Below is a model that uses a SQL Server TIMESTAMP column as the primary key, which generates values automatically:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(TIMESTAMP(), server_default=FetchedValue(), primary_key=True) @@ -419,7 +425,7 @@ Using the example of a :class:`.DateTime` column for MySQL, we add an explicit pre-execute-supported default using the "NOW()" SQL function:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column(DateTime(), default=func.now(), primary_key=True) @@ -445,13 +451,11 @@ into the column:: from sqlalchemy import cast, Binary + class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" - timestamp = Column( - TIMESTAMP(), - default=cast(func.now(), Binary), - primary_key=True) + timestamp = Column(TIMESTAMP(), default=cast(func.now(), Binary), primary_key=True) Above, in addition to selecting the "NOW()" function, we additionally make use of the :class:`.Binary` datatype in conjunction with :func:`.cast` so that @@ -478,12 +482,13 @@ We therefore must also specify that we'd like to coerce the return value to by passing this as the ``type_`` parameter:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" timestamp = Column( DateTime, - default=func.datetime('now', 'localtime', type_=DateTime), - primary_key=True) + default=func.datetime("now", "localtime", type_=DateTime), + primary_key=True, + ) The above mapping upon INSERT will look like: @@ -533,12 +538,17 @@ values using RETURNING when available, :paramref:`_schema.Column.server_default` to ensure that the fetch occurs:: class MyModel(Base): - __tablename__ = 'my_table' + __tablename__ = "my_table" id = Column(Integer, primary_key=True) created = Column(DateTime(), default=func.now(), server_default=FetchedValue()) - updated = Column(DateTime(), onupdate=func.now(), server_default=FetchedValue(), server_onupdate=FetchedValue()) + updated = Column( + DateTime(), + onupdate=func.now(), + server_default=FetchedValue(), + server_onupdate=FetchedValue(), + ) __mapper_args__ = {"eager_defaults": True} @@ -573,8 +583,12 @@ corresponding to all the rows which were matched by the criteria:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User.id) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User.id) + ) for row in session.execute(stmt): print(f"id: {row.id}") @@ -588,8 +602,12 @@ achieve this, we may combine the :class:`_dml.Update` construct which returns statement in an ORM context using the :meth:`_sql.Select.from_statement` method:: - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - returning(User) + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .returning(User) + ) orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) @@ -638,11 +656,7 @@ database, while simultaneously producing those objects as ORM instances:: index_elements=[User.name], set_=dict(fullname=stmt.excluded.fullname) ).returning(User) - orm_stmt = ( - select(User) - .from_statement(stmt) - .execution_options(populate_existing=True) - ) + orm_stmt = select(User).from_statement(stmt).execution_options(populate_existing=True) for user in session.execute( orm_stmt, ).scalars(): @@ -718,13 +732,13 @@ The dictionary is consulted whenever the :class:`.Session` needs to emit SQL on behalf of a particular kind of mapped class in order to locate the appropriate source of database connectivity:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker() # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -819,26 +833,25 @@ a custom :class:`.Session` which delivers the following rules: :: engines = { - 'leader':create_engine("sqlite:///leader.db"), - 'other':create_engine("sqlite:///other.db"), - 'follower1':create_engine("sqlite:///follower1.db"), - 'follower2':create_engine("sqlite:///follower2.db"), + "leader": create_engine("sqlite:///leader.db"), + "other": create_engine("sqlite:///other.db"), + "follower1": create_engine("sqlite:///follower1.db"), + "follower2": create_engine("sqlite:///follower2.db"), } from sqlalchemy.sql import Update, Delete from sqlalchemy.orm import Session, sessionmaker import random + class RoutingSession(Session): def get_bind(self, mapper=None, clause=None): if mapper and issubclass(mapper.class_, MyOtherClass): - return engines['other'] + return engines["other"] elif self._flushing or isinstance(clause, (Update, Delete)): - return engines['leader'] + return engines["leader"] else: - return engines[ - random.choice(['follower1','follower2']) - ] + return engines[random.choice(["follower1", "follower2"])] The above :class:`.Session` class is plugged in using the ``class_`` argument to :class:`.sessionmaker`:: @@ -959,19 +972,13 @@ The methods each work in the context of the :class:`.Session` object's transaction, like any other:: s = Session() - objects = [ - User(name="u1"), - User(name="u2"), - User(name="u3") - ] + objects = [User(name="u1"), User(name="u2"), User(name="u3")] s.bulk_save_objects(objects) For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, dictionaries are passed:: - s.bulk_insert_mappings(User, - [dict(name="u1"), dict(name="u2"), dict(name="u3")] - ) + s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")]) .. seealso:: diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 9fcd2c1bc04..70254234e45 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -23,37 +23,37 @@ upon the content at :ref:`tutorial_selecting_data`. >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) >>> from sqlalchemy import ForeignKey >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> orders_table = Table( ... "user_order", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', None, ForeignKey('user_account.id')), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", None, ForeignKey("user_account.id")), + ... Column("email_address", String, nullable=False), ... ) >>> order_items_table = Table( ... "order_items", ... metadata_obj, ... Column("order_id", ForeignKey("user_order.id"), primary_key=True), - ... Column("item_id", ForeignKey("item.id"), primary_key=True) + ... Column("item_id", ForeignKey("item.id"), primary_key=True), ... ) >>> items_table = Table( ... "item", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String), - ... Column('description', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String), + ... Column("description", String), ... ) >>> metadata_obj.create_all(engine) BEGIN (implicit) @@ -68,7 +68,7 @@ upon the content at :ref:`tutorial_selecting_data`. ... orders = relationship("Order") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): ... __table__ = address_table @@ -88,22 +88,34 @@ upon the content at :ref:`tutorial_selecting_data`. >>> conn = engine.connect() >>> from sqlalchemy.orm import Session >>> session = Session(conn) - >>> session.add_all([ - ... User(name="spongebob", fullname="Spongebob Squarepants", addresses=[ - ... Address(email_address="spongebob@sqlalchemy.org") - ... ]), - ... User(name="sandy", fullname="Sandy Cheeks", addresses=[ - ... Address(email_address="sandy@sqlalchemy.org"), - ... Address(email_address="squirrel@squirrelpower.org") - ... ]), - ... User(name="patrick", fullname="Patrick Star", addresses=[ - ... Address(email_address="pat999@aol.com") - ... ]), - ... User(name="squidward", fullname="Squidward Tentacles", addresses=[ - ... Address(email_address="stentcl@sqlalchemy.org") - ... ]), - ... User(name="ehkrabs", fullname="Eugene H. Krabs"), - ... ]) + >>> session.add_all( + ... [ + ... User( + ... name="spongebob", + ... fullname="Spongebob Squarepants", + ... addresses=[Address(email_address="spongebob@sqlalchemy.org")], + ... ), + ... User( + ... name="sandy", + ... fullname="Sandy Cheeks", + ... addresses=[ + ... Address(email_address="sandy@sqlalchemy.org"), + ... Address(email_address="squirrel@squirrelpower.org"), + ... ], + ... ), + ... User( + ... name="patrick", + ... fullname="Patrick Star", + ... addresses=[Address(email_address="pat999@aol.com")], + ... ), + ... User( + ... name="squidward", + ... fullname="Squidward Tentacles", + ... addresses=[Address(email_address="stentcl@sqlalchemy.org")], + ... ), + ... User(name="ehkrabs", fullname="Eugene H. Krabs"), + ... ] + ... ) >>> session.commit() BEGIN ... >>> conn.begin() @@ -117,7 +129,7 @@ SELECT statements are produced by the :func:`_sql.select` function which returns a :class:`_sql.Select` object:: >>> from sqlalchemy import select - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") To invoke a :class:`_sql.Select` with the ORM, it is passed to :meth:`_orm.Session.execute`:: @@ -184,7 +196,7 @@ same time:: >>> stmt = select(User, Address).join(User.addresses).order_by(User.id, Address.id) {sql}>>> for row in session.execute(stmt): - ... print(f"{row.User.name} {row.Address.email_address}") + ... print(f"{row.User.name} {row.Address.email_address}") SELECT user_account.id, user_account.name, user_account.fullname, address.id AS id_1, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -207,9 +219,9 @@ when passed to :func:`_sql.select`. They may be used in the same way as table columns are used:: {sql}>>> result = session.execute( - ... select(User.name, Address.email_address). - ... join(User.addresses). - ... order_by(User.id, Address.id) + ... select(User.name, Address.email_address) + ... .join(User.addresses) + ... .order_by(User.id, Address.id) ... ) SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -238,8 +250,7 @@ allows sets of column expressions to be grouped in result rows:: >>> from sqlalchemy.orm import Bundle >>> stmt = select( - ... Bundle("user", User.name, User.fullname), - ... Bundle("email", Address.email_address) + ... Bundle("user", User.name, User.fullname), Bundle("email", Address.email_address) ... ).join_from(User, Address) {sql}>>> for row in session.execute(stmt): ... print(f"{row.user.name} {row.user.fullname} {row.email.email_address}") @@ -429,8 +440,7 @@ is used:: >>> from sqlalchemy import union_all >>> u = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).order_by(User.id) >>> stmt = select(User).from_statement(u) >>> for user_obj in session.execute(stmt).scalars(): @@ -455,8 +465,7 @@ entity in a :func:`_sql.select` construct, including that we can add filtering and order by criteria based on its exported columns:: >>> subq = union_all( - ... select(User).where(User.id < 2), - ... select(User).where(User.id == 3) + ... select(User).where(User.id < 2), select(User).where(User.id == 3) ... ).subquery() >>> user_alias = aliased(User, subq) >>> stmt = select(user_alias).order_by(user_alias.id) @@ -531,11 +540,7 @@ a JOIN first from ``User`` to ``Order``, and a second from ``Order`` to relationship, it results in two separate JOIN elements, for a total of three JOIN elements in the resulting SQL:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -559,12 +564,7 @@ as potential points to continue joining FROM. We can continue to add other elements to join FROM the ``User`` entity above, for example adding on the ``User.addresses`` relationship to our chain of joins:: - >>> stmt = ( - ... select(User). - ... join(User.orders). - ... join(Order.items). - ... join(User.addresses) - ... ) + >>> stmt = select(User).join(User.orders).join(Order.items).join(User.addresses) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -610,7 +610,7 @@ The third calling form allows both the target entity as well as the ON clause to be passed explicitly. A example that includes a SQL expression as the ON clause is as follows:: - >>> stmt = select(User).join(Address, User.id==Address.user_id) + >>> stmt = select(User).join(Address, User.id == Address.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address ON user_account.id = address.user_id @@ -633,11 +633,11 @@ below:: >>> a1 = aliased(Address) >>> a2 = aliased(Address) >>> stmt = ( - ... select(User). - ... join(a1, User.addresses). - ... join(a2, User.addresses). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(a1, User.addresses) + ... .join(a2, User.addresses) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -653,11 +653,11 @@ substituted with an aliased entity by using the this method would be:: >>> stmt = ( - ... select(User). - ... join(User.addresses.of_type(a1)). - ... join(User.addresses.of_type(a2)). - ... where(a1.email_address == 'ed@foo.com'). - ... where(a2.email_address == 'ed@bar.com') + ... select(User) + ... .join(User.addresses.of_type(a1)) + ... .join(User.addresses.of_type(a2)) + ... .where(a1.email_address == "ed@foo.com") + ... .where(a2.email_address == "ed@bar.com") ... ) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -681,10 +681,7 @@ with the default criteria using AND. Below, the ON criteria between by ``AND``, the first one being the natural join along the foreign key, and the second being a custom limiting criteria:: - >>> stmt = ( - ... select(User). - ... join(User.addresses.and_(Address.email_address != 'foo@bar.com')) - ... ) + >>> stmt = select(User).join(User.addresses.and_(Address.email_address != "foo@bar.com")) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -711,11 +708,7 @@ is represented as a row limited subquery, we first construct a :class:`_sql.Subq object using :meth:`_sql.Select.subquery`, which may then be used as the target of the :meth:`_sql.Select.join` method:: - >>> subq = ( - ... select(Address). - ... where(Address.email_address == 'pat999@aol.com'). - ... subquery() - ... ) + >>> subq = select(Address).where(Address.email_address == "pat999@aol.com").subquery() >>> stmt = select(User).join(subq, User.id == subq.c.user_id) >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname @@ -754,14 +747,14 @@ will remain unique within the statement, while the entities that are linked to it using :class:`_orm.aliased` refer to distinct sets of columns:: >>> user_address_subq = ( - ... select(User.id, User.name, Address.id, Address.email_address). - ... join_from(User, Address). - ... where(Address.email_address.in_(['pat999@aol.com', 'squirrel@squirrelpower.org'])). - ... subquery() + ... select(User.id, User.name, Address.id, Address.email_address) + ... .join_from(User, Address) + ... .where(Address.email_address.in_(["pat999@aol.com", "squirrel@squirrelpower.org"])) + ... .subquery() ... ) >>> user_alias = aliased(User, user_address_subq, name="user") >>> address_alias = aliased(Address, user_address_subq, name="address") - >>> stmt = select(user_alias, address_alias).where(user_alias.name == 'sandy') + >>> stmt = select(user_alias, address_alias).where(user_alias.name == "sandy") >>> for row in session.execute(stmt): ... print(f"{row.user} {row.address}") {opensql}SELECT anon_1.id, anon_1.name, anon_1.id_1, anon_1.email_address @@ -782,7 +775,7 @@ In cases where the left side of the current state of :class:`_sql.Select` is not in line with what we want to join from, the :meth:`_sql.Select.join_from` method may be used:: - >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, User.addresses).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -792,7 +785,7 @@ The :meth:`_sql.Select.join_from` method accepts two or three arguments, either in the form ``, ``, or ``, , []``:: - >>> stmt = select(Address).join_from(User, Address).where(User.name == 'sandy') + >>> stmt = select(Address).join_from(User, Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -803,7 +796,7 @@ can be used subsequent, the :meth:`_sql.Select.select_from` method may also be used:: - >>> stmt = select(Address).select_from(User).join(Address).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -820,7 +813,7 @@ be used:: such a :class:`_sql.Join` object. Therefore we can see the contents of :meth:`_sql.Select.select_from` being overridden in a case like this:: - >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == 'sandy') + >>> stmt = select(Address).select_from(User).join(Address.user).where(User.name == "sandy") >>> print(stmt) SELECT address.id, address.user_id, address.email_address FROM address JOIN user_account ON user_account.id = address.user_id @@ -837,8 +830,10 @@ be used:: >>> >>> j = address_table.join(user_table, user_table.c.id == address_table.c.user_id) >>> stmt = ( - ... select(address_table).select_from(user_table).select_from(j). - ... where(user_table.c.name == 'sandy') + ... select(address_table) + ... .select_from(user_table) + ... .select_from(j) + ... .where(user_table.c.name == "sandy") ... ) >>> print(stmt) SELECT address.id, address.user_id, address.email_address @@ -1163,7 +1158,7 @@ same way as the legacy :attr:`.Query.column_descriptions` attribute. The format returned is a list of dictionaries:: >>> from pprint import pprint - >>> user_alias = aliased(User, name='user2') + >>> user_alias = aliased(User, name="user2") >>> stmt = select(User, User.id, user_alias) >>> pprint(stmt.column_descriptions) [{'aliased': False, diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index f15fa4a6c7c..f1240e7bd83 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -278,10 +278,10 @@ construct creates joins using the :meth:`_sql.Select.join` method: .. sourcecode:: pycon+sql >>> stmt = ( - ... select(Address) - ... .join(Address.user) - ... .where(User.name == "sandy") - ... .where(Address.email_address == "sandy@sqlalchemy.org") + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "sandy") + ... .where(Address.email_address == "sandy@sqlalchemy.org") ... ) >>> sandy_address = session.scalars(stmt).one() {opensql}SELECT address.id, address.email_address, address.user_id @@ -320,9 +320,7 @@ address associated with "sandy", and also add a new email address to [...] ('patrick',) {stop} - >>> patrick.addresses.append( - ... Address(email_address="patrickstar@sqlalchemy.org") - ... ) + >>> patrick.addresses.append(Address(email_address="patrickstar@sqlalchemy.org")) {opensql}SELECT address.id AS address_id, address.email_address AS address_email_address, address.user_id AS address_user_id FROM address WHERE ? = address.user_id diff --git a/doc/build/orm/relationship_persistence.rst b/doc/build/orm/relationship_persistence.rst index f843764741d..77396639abe 100644 --- a/doc/build/orm/relationship_persistence.rst +++ b/doc/build/orm/relationship_persistence.rst @@ -64,27 +64,27 @@ a complete example, including two :class:`_schema.ForeignKey` constructs:: Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" widget_id = Column(Integer, primary_key=True) - favorite_entry_id = Column(Integer, - ForeignKey('entry.entry_id', - name="fk_favorite_entry")) + favorite_entry_id = Column( + Integer, ForeignKey("entry.entry_id", name="fk_favorite_entry") + ) name = Column(String(50)) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - post_update=True) + entries = relationship(Entry, primaryjoin=widget_id == Entry.widget_id) + favorite_entry = relationship( + Entry, primaryjoin=favorite_entry_id == Entry.entry_id, post_update=True + ) When a structure against the above configuration is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will @@ -94,8 +94,8 @@ row at a time for the time being): .. sourcecode:: pycon+sql - >>> w1 = Widget(name='somewidget') - >>> e1 = Entry(name='someentry') + >>> w1 = Widget(name="somewidget") + >>> e1 = Entry(name="someentry") >>> w1.favorite_entry = e1 >>> w1.entries = [e1] >>> session.add_all([w1, e1]) @@ -115,26 +115,32 @@ it's guaranteed that ``favorite_entry_id`` refers to an ``Entry`` that also refers to this ``Widget``. We can use a composite foreign key, as illustrated below:: - from sqlalchemy import Integer, ForeignKey, String, \ - Column, UniqueConstraint, ForeignKeyConstraint + from sqlalchemy import ( + Integer, + ForeignKey, + String, + Column, + UniqueConstraint, + ForeignKeyConstraint, + ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import relationship Base = declarative_base() + class Entry(Base): - __tablename__ = 'entry' + __tablename__ = "entry" entry_id = Column(Integer, primary_key=True) - widget_id = Column(Integer, ForeignKey('widget.widget_id')) + widget_id = Column(Integer, ForeignKey("widget.widget_id")) name = Column(String(50)) - __table_args__ = ( - UniqueConstraint("entry_id", "widget_id"), - ) + __table_args__ = (UniqueConstraint("entry_id", "widget_id"),) + class Widget(Base): - __tablename__ = 'widget' + __tablename__ = "widget" - widget_id = Column(Integer, autoincrement='ignore_fk', primary_key=True) + widget_id = Column(Integer, autoincrement="ignore_fk", primary_key=True) favorite_entry_id = Column(Integer) name = Column(String(50)) @@ -143,18 +149,19 @@ as illustrated below:: ForeignKeyConstraint( ["widget_id", "favorite_entry_id"], ["entry.widget_id", "entry.entry_id"], - name="fk_favorite_entry" + name="fk_favorite_entry", ), ) - entries = relationship(Entry, primaryjoin= - widget_id==Entry.widget_id, - foreign_keys=Entry.widget_id) - favorite_entry = relationship(Entry, - primaryjoin= - favorite_entry_id==Entry.entry_id, - foreign_keys=favorite_entry_id, - post_update=True) + entries = relationship( + Entry, primaryjoin=widget_id == Entry.widget_id, foreign_keys=Entry.widget_id + ) + favorite_entry = relationship( + Entry, + primaryjoin=favorite_entry_id == Entry.entry_id, + foreign_keys=favorite_entry_id, + post_update=True, + ) The above mapping features a composite :class:`_schema.ForeignKeyConstraint` bridging the ``widget_id`` and ``favorite_entry_id`` columns. To ensure @@ -184,8 +191,8 @@ capabilities of the database. An example mapping which illustrates this is:: class User(Base): - __tablename__ = 'user' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "user" + __table_args__ = {"mysql_engine": "InnoDB"} username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -194,13 +201,11 @@ illustrates this is:: class Address(Base): - __tablename__ = 'address' - __table_args__ = {'mysql_engine': 'InnoDB'} + __tablename__ = "address" + __table_args__ = {"mysql_engine": "InnoDB"} email = Column(String(50), primary_key=True) - username = Column(String(50), - ForeignKey('user.username', onupdate="cascade") - ) + username = Column(String(50), ForeignKey("user.username", onupdate="cascade")) Above, we illustrate ``onupdate="cascade"`` on the :class:`_schema.ForeignKey` object, and we also illustrate the ``mysql_engine='InnoDB'`` setting @@ -245,7 +250,7 @@ will be fully loaded into memory if not already locally present. Our previous mapping using ``passive_updates=False`` looks like:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" username = Column(String(50), primary_key=True) fullname = Column(String(100)) @@ -254,11 +259,12 @@ Our previous mapping using ``passive_updates=False`` looks like:: # does not implement ON UPDATE CASCADE addresses = relationship("Address", passive_updates=False) + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" email = Column(String(50), primary_key=True) - username = Column(String(50), ForeignKey('user.username')) + username = Column(String(50), ForeignKey("user.username")) Key limitations of ``passive_updates=False`` include: diff --git a/doc/build/orm/self_referential.rst b/doc/build/orm/self_referential.rst index 71b7a06efd6..6db51520e88 100644 --- a/doc/build/orm/self_referential.rst +++ b/doc/build/orm/self_referential.rst @@ -26,9 +26,9 @@ In this example, we'll work with a single mapped class called ``Node``, representing a tree structure:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) children = relationship("Node") @@ -60,9 +60,9 @@ is a :class:`_schema.Column` or collection of :class:`_schema.Column` objects that indicate those which should be considered to be "remote":: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) parent = relationship("Node", remote_side=[id]) @@ -75,13 +75,11 @@ As always, both directions can be combined into a bidirectional relationship using the :func:`.backref` function:: class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - backref=backref('parent', remote_side=[id]) - ) + children = relationship("Node", backref=backref("parent", remote_side=[id])) There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and @@ -99,11 +97,11 @@ the same account as that of the parent; while ``folder_id`` refers to a specific folder within that account:: class Folder(Base): - __tablename__ = 'folder' + __tablename__ = "folder" __table_args__ = ( - ForeignKeyConstraint( - ['account_id', 'parent_id'], - ['folder.account_id', 'folder.folder_id']), + ForeignKeyConstraint( + ["account_id", "parent_id"], ["folder.account_id", "folder.folder_id"] + ), ) account_id = Column(Integer, primary_key=True) @@ -111,10 +109,9 @@ to a specific folder within that account:: parent_id = Column(Integer) name = Column(String) - parent_folder = relationship("Folder", - backref="child_folders", - remote_side=[account_id, folder_id] - ) + parent_folder = relationship( + "Folder", backref="child_folders", remote_side=[account_id, folder_id] + ) Above, we pass ``account_id`` into the :paramref:`_orm.relationship.remote_side` list. :func:`_orm.relationship` recognizes that the ``account_id`` column here @@ -130,7 +127,7 @@ Self-Referential Query Strategies Querying of self-referential structures works like any other query:: # get all nodes named 'child2' - session.query(Node).filter(Node.data=='child2') + session.query(Node).filter(Node.data == "child2") However extra care is needed when attempting to join along the foreign key from one level of the tree to the next. In SQL, @@ -147,10 +144,9 @@ looks like: from sqlalchemy.orm import aliased nodealias = aliased(Node) - session.query(Node).filter(Node.data=='subchild1').\ - join(Node.parent.of_type(nodealias)).\ - filter(nodealias.data=="child2").\ - all() + session.query(Node).filter(Node.data == "subchild1").join( + Node.parent.of_type(nodealias) + ).filter(nodealias.data == "child2").all() {opensql}SELECT node.id AS node_id, node.parent_id AS node_parent_id, node.data AS node_data @@ -182,13 +178,12 @@ configured via :paramref:`~.relationships.join_depth`: .. sourcecode:: python+sql class Node(Base): - __tablename__ = 'node' + __tablename__ = "node" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) + parent_id = Column(Integer, ForeignKey("node.id")) data = Column(String(50)) - children = relationship("Node", - lazy="joined", - join_depth=2) + children = relationship("Node", lazy="joined", join_depth=2) + session.query(Node).all() {opensql}SELECT node_1.id AS node_1_id, diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index 16b2cae5f81..fcf384d4a2a 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -62,7 +62,7 @@ may look like:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # create session and add objects with Session(engine) as session: @@ -129,8 +129,8 @@ operations:: # create session and add objects with Session(engine) as session: with session.begin(): - session.add(some_object) - session.add(some_other_object) + session.add(some_object) + session.add(some_other_object) # inner context calls session.commit(), if there were no exceptions # outer context calls session.close() @@ -157,7 +157,7 @@ scope, the :class:`_orm.sessionmaker` can provide a factory for # an Engine, which the Session will use for connection # resources, typically in module scope - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -182,7 +182,7 @@ and also maintains a begin/commit/rollback block:: # an Engine, which the Session will use for connection # resources - engine = create_engine('postgresql://scott:tiger@localhost/') + engine = create_engine("postgresql://scott:tiger@localhost/") # a sessionmaker(), also in the same scope as the engine Session = sessionmaker(engine) @@ -223,10 +223,10 @@ will issue mapper queries within the context of this Session. By other ORM constructs such as an :func:`_orm.aliased` construct:: # query from a class - results = session.query(User).filter_by(name='ed').all() + results = session.query(User).filter_by(name="ed").all() # query with multiple classes, returns tuples - results = session.query(User, Address).join('addresses').filter_by(name='ed').all() + results = session.query(User, Address).join("addresses").filter_by(name="ed").all() # query using orm-columns, also returns tuples results = session.query(User.name, User.fullname).all() @@ -283,7 +283,7 @@ statements that use ORM entities:: result = session.execute(statement).scalars().all() # query with multiple classes - statement = select(User, Address).join('addresses').filter_by(name='ed') + statement = select(User, Address).join("addresses").filter_by(name="ed") # list of tuples result = session.execute(statement).all() @@ -328,12 +328,12 @@ already present and do not need to be added. Instances which are :term:`detached (i.e. have been removed from a session) may be re-associated with a session using this method:: - user1 = User(name='user1') - user2 = User(name='user2') + user1 = User(name="user1") + user2 = User(name="user2") session.add(user1) session.add(user2) - session.commit() # write changes to the database + session.commit() # write changes to the database To add a list of items to the session at once, use :meth:`~.Session.add_all`:: @@ -614,8 +614,9 @@ time refresh locally present objects which match those rows. To emit an ORM-enabled UPDATE in :term:`1.x style`, the :meth:`_query.Query.update` method may be used:: - session.query(User).filter(User.name == "squidward").\ - update({"name": "spongebob"}, synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").update( + {"name": "spongebob"}, synchronize_session="fetch" + ) Above, an UPDATE will be emitted against all rows that match the name "squidward" and be updated to the name "spongebob". The @@ -630,8 +631,12 @@ Core :class:`_sql.Update` construct:: from sqlalchemy import update - stmt = update(User).where(User.name == "squidward").values(name="spongebob").\ - execution_options(synchronize_session="fetch") + stmt = ( + update(User) + .where(User.name == "squidward") + .values(name="spongebob") + .execution_options(synchronize_session="fetch") + ) result = session.execute(stmt) @@ -650,14 +655,17 @@ within the :class:`_orm.Session` will be marked as deleted and expunged. ORM-enabled delete, :term:`1.x style`:: - session.query(User).filter(User.name == "squidward").\ - delete(synchronize_session="fetch") + session.query(User).filter(User.name == "squidward").delete(synchronize_session="fetch") ORM-enabled delete, :term:`2.0 style`:: from sqlalchemy import delete - stmt = delete(User).where(User.name == "squidward").execution_options(synchronize_session="fetch") + stmt = ( + delete(User) + .where(User.name == "squidward") + .execution_options(synchronize_session="fetch") + ) session.execute(stmt) @@ -1035,6 +1043,7 @@ E.g. **don't do this**:: ### this is the **wrong way to do it** ### + class ThingOne(object): def go(self): session = Session() @@ -1045,6 +1054,7 @@ E.g. **don't do this**:: session.rollback() raise + class ThingTwo(object): def go(self): session = Session() @@ -1055,6 +1065,7 @@ E.g. **don't do this**:: session.rollback() raise + def run_my_program(): ThingOne().go() ThingTwo().go() @@ -1067,21 +1078,23 @@ transaction automatically:: ### this is a **better** (but not the only) way to do it ### + class ThingOne(object): def go(self, session): session.query(FooBar).update({"x": 5}) + class ThingTwo(object): def go(self, session): session.query(Widget).update({"q": 18}) + def run_my_program(): with Session() as session: with session.begin(): ThingOne().go(session) ThingTwo().go(session) - .. versionchanged:: 1.4 The :class:`_orm.Session` may be used as a context manager without the use of external helper functions. @@ -1119,6 +1132,7 @@ available on :class:`~sqlalchemy.orm.session.Session`:: The newer :ref:`core_inspection_toplevel` system can also be used:: from sqlalchemy import inspect + session = inspect(someobject).session .. _session_faq_threadsafe: diff --git a/doc/build/orm/session_events.rst b/doc/build/orm/session_events.rst index 544a6c5773d..c24bb9fa962 100644 --- a/doc/build/orm/session_events.rst +++ b/doc/build/orm/session_events.rst @@ -47,6 +47,7 @@ options:: Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if orm_execute_state.is_select: @@ -58,7 +59,7 @@ options:: # ORDER BY if so col_descriptions = orm_execute_state.statement.column_descriptions - if col_descriptions[0]['entity'] is MyEntity: + if col_descriptions[0]["entity"] is MyEntity: orm_execute_state.statement = statement.order_by(MyEntity.name) The above example illustrates some simple modifications to SELECT statements. @@ -85,13 +86,14 @@ may be used on its own, or is ideally suited to be used within the Session = sessionmaker(engine, future=True) + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select and - not orm_execute_state.is_column_load and - not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): orm_execute_state.statement = orm_execute_state.statement.options( with_loader_criteria(MyEntity.public == True) @@ -114,6 +116,7 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: import datetime + class HasTimestamp(object): timestamp = Column(DateTime, default=datetime.datetime.now) @@ -122,11 +125,11 @@ Given a series of classes based on a mixin called ``HasTimestamp``:: __tablename__ = "some_entity" id = Column(Integer, primary_key=True) + class SomeOtherEntity(HasTimestamp, Base): __tablename__ = "some_entity" id = Column(Integer, primary_key=True) - The above classes ``SomeEntity`` and ``SomeOtherEntity`` will each have a column ``timestamp`` that defaults to the current date and time. An event may be used to intercept all objects that extend from ``HasTimestamp`` and filter their @@ -135,9 +138,9 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if ( - orm_execute_state.is_select - and not orm_execute_state.is_column_load - and not orm_execute_state.is_relationship_load + orm_execute_state.is_select + and not orm_execute_state.is_column_load + and not orm_execute_state.is_relationship_load ): one_month_ago = datetime.datetime.today() - datetime.timedelta(months=1) @@ -145,7 +148,7 @@ to intercept all objects that extend from ``HasTimestamp`` and filter their with_loader_criteria( HasTimestamp, lambda cls: cls.timestamp >= one_month_ago, - include_aliases=True + include_aliases=True, ) ) @@ -202,6 +205,7 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: cache = {} + @event.listens_for(Session, "do_orm_execute") def _do_orm_execute(orm_execute_state): if "my_cache_key" in orm_execute_state.execution_options: @@ -222,7 +226,9 @@ E.g., using :meth:`_orm.SessionEvents.do_orm_execute` to implement a cache:: With the above hook in place, an example of using the cache would look like:: - stmt = select(User).where(User.name == 'sandy').execution_options(my_cache_key="key_sandy") + stmt = ( + select(User).where(User.name == "sandy").execution_options(my_cache_key="key_sandy") + ) result = session.execute(stmt) @@ -413,7 +419,8 @@ with a specific :class:`.Session` object:: session = Session() - @event.listens_for(session, 'transient_to_pending') + + @event.listens_for(session, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -425,7 +432,8 @@ Or with the :class:`.Session` class itself, as well as with a specific maker = sessionmaker() - @event.listens_for(maker, 'transient_to_pending') + + @event.listens_for(maker, "transient_to_pending") def object_is_pending(session, obj): print("new pending: %s" % obj) @@ -457,11 +465,11 @@ intercept all new objects for a particular declarative base:: Base = declarative_base() + @event.listens_for(Base, "init", propagate=True) def intercept_init(instance, args, kwargs): print("new transient: %s" % instance) - Transient to Pending ^^^^^^^^^^^^^^^^^^^^ @@ -476,7 +484,6 @@ the :meth:`.SessionEvents.transient_to_pending` event:: def intercept_transient_to_pending(session, object_): print("transient to pending: %s" % object_) - Pending to Persistent ^^^^^^^^^^^^^^^^^^^^^ @@ -517,7 +524,6 @@ state via this particular avenue:: def intercept_loaded_as_persistent(session, object_): print("object loaded into persistent state: %s" % object_) - Persistent to Transient ^^^^^^^^^^^^^^^^^^^^^^^ @@ -561,7 +567,6 @@ Track the persistent to deleted transition with def intercept_persistent_to_deleted(session, object_): print("object was DELETEd, is now in deleted state: %s" % object_) - Deleted to Detached ^^^^^^^^^^^^^^^^^^^ @@ -575,7 +580,6 @@ the deleted to detached transition using :meth:`.SessionEvents.deleted_to_detach def intercept_deleted_to_detached(session, object_): print("deleted to detached: %s" % object_) - .. note:: While the object is in the deleted state, the :attr:`.InstanceState.deleted` @@ -618,7 +622,6 @@ objects moving back to persistent from detached using the def intercept_detached_to_persistent(session, object_): print("object became persistent again: %s" % object_) - Deleted to Persistent ^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/build/orm/session_state_management.rst b/doc/build/orm/session_state_management.rst index 31e82ab62cb..c1d7230686c 100644 --- a/doc/build/orm/session_state_management.rst +++ b/doc/build/orm/session_state_management.rst @@ -142,25 +142,25 @@ the :term:`persistent` state is as follows:: from sqlalchemy import event + def strong_reference_session(session): @event.listens_for(session, "pending_to_persistent") @event.listens_for(session, "deleted_to_persistent") @event.listens_for(session, "detached_to_persistent") @event.listens_for(session, "loaded_as_persistent") def strong_ref_object(sess, instance): - if 'refs' not in sess.info: - sess.info['refs'] = refs = set() + if "refs" not in sess.info: + sess.info["refs"] = refs = set() else: - refs = sess.info['refs'] + refs = sess.info["refs"] refs.add(instance) - @event.listens_for(session, "persistent_to_detached") @event.listens_for(session, "persistent_to_deleted") @event.listens_for(session, "persistent_to_transient") def deref_object(sess, instance): - sess.info['refs'].discard(instance) + sess.info["refs"].discard(instance) Above, we intercept the :meth:`.SessionEvents.pending_to_persistent`, :meth:`.SessionEvents.detached_to_persistent`, @@ -186,7 +186,6 @@ It may also be called for any :class:`.sessionmaker`:: maker = sessionmaker() strong_reference_session(maker) - .. _unitofwork_merging: Merging @@ -290,22 +289,23 @@ some unexpected state regarding the object being passed to :meth:`~.Session.merg Lets use the canonical example of the User and Address objects:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) addresses = relationship("Address", backref="user") + class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" id = Column(Integer, primary_key=True) email_address = Column(String(50), nullable=False) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) + user_id = Column(Integer, ForeignKey("user.id"), nullable=False) Assume a ``User`` object with one ``Address``, already persistent:: - >>> u1 = User(name='ed', addresses=[Address(email_address='ed@ed.com')]) + >>> u1 = User(name="ed", addresses=[Address(email_address="ed@ed.com")]) >>> session.add(u1) >>> session.commit() @@ -419,7 +419,7 @@ When we talk about expiration of data we are usually talking about an object that is in the :term:`persistent` state. For example, if we load an object as follows:: - user = session.query(User).filter_by(name='user1').first() + user = session.query(User).filter_by(name="user1").first() The above ``User`` object is persistent, and has a series of attributes present; if we were to look inside its ``__dict__``, we'd see that state @@ -481,7 +481,7 @@ Another key behavior of both :meth:`~.Session.expire` and :meth:`~.Session.refre is that all un-flushed changes on an object are discarded. That is, if we were to modify an attribute on our ``User``:: - >>> user.name = 'user2' + >>> user.name = "user2" but then we call :meth:`~.Session.expire` without first calling :meth:`~.Session.flush`, our pending value of ``'user2'`` is discarded:: @@ -500,7 +500,7 @@ it can also be passed a list of string attribute names, referring to specific attributes to be marked as expired:: # expire only attributes obj1.attr1, obj1.attr2 - session.expire(obj1, ['attr1', 'attr2']) + session.expire(obj1, ["attr1", "attr2"]) The :meth:`.Session.expire_all` method allows us to essentially call :meth:`.Session.expire` on all objects contained within the :class:`.Session` @@ -519,7 +519,7 @@ but unlike :meth:`~.Session.expire`, expects at least one name to be that of a column-mapped attribute:: # reload obj1.attr1, obj1.attr2 - session.refresh(obj1, ['attr1', 'attr2']) + session.refresh(obj1, ["attr1", "attr2"]) .. tip:: diff --git a/doc/build/orm/session_transaction.rst b/doc/build/orm/session_transaction.rst index c7df69f4292..1d246b79ab9 100644 --- a/doc/build/orm/session_transaction.rst +++ b/doc/build/orm/session_transaction.rst @@ -28,6 +28,7 @@ the scope of the :class:`_orm.SessionTransaction`. Below, assume we start with a :class:`_orm.Session`:: from sqlalchemy.orm import Session + session = Session(engine) We can now run operations within a demarcated transaction using a context @@ -139,7 +140,7 @@ method:: session.add(u1) session.add(u2) - nested = session.begin_nested() # establish a savepoint + nested = session.begin_nested() # establish a savepoint session.add(u3) nested.rollback() # rolls back u3, keeps u1 and u2 @@ -163,9 +164,9 @@ rolling back the whole transaction, as in the example below:: for record in records: try: with session.begin_nested(): - session.merge(record) + session.merge(record) except: - print("Skipped record %s" % record) + print("Skipped record %s" % record) session.commit() When the context manager yielded by :meth:`_orm.Session.begin_nested` @@ -264,8 +265,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) conn.commit() @@ -274,11 +275,13 @@ Session:: Session = sessionmaker(engine, future=True) with Session() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) session.commit() Begin Once @@ -300,8 +303,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) # commits and closes automatically @@ -310,14 +313,15 @@ Session:: Session = sessionmaker(engine, future=True) with Session.begin() as session: - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) # commits and closes automatically - Nested Transaction ~~~~~~~~~~~~~~~~~~~~ @@ -339,8 +343,8 @@ Engine:: [ {"data": "some data one"}, {"data": "some data two"}, - {"data": "some data three"} - ] + {"data": "some data three"}, + ], ) savepoint.commit() # or rollback @@ -352,17 +356,16 @@ Session:: with Session.begin() as session: savepoint = session.begin_nested() - session.add_all([ - SomeClass(data="some data one"), - SomeClass(data="some data two"), - SomeClass(data="some data three") - ]) + session.add_all( + [ + SomeClass(data="some data one"), + SomeClass(data="some data two"), + SomeClass(data="some data three"), + ] + ) savepoint.commit() # or rollback # commits automatically - - - .. _session_autocommit: .. _session_explicit_begin: @@ -399,8 +402,8 @@ point at which the "begin" operation occurs. To suit this, the try: item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" session.commit() except: session.rollback() @@ -413,8 +416,8 @@ The above pattern is more idiomatically invoked using a context manager:: with session.begin(): item1 = session.query(Item).get(1) item2 = session.query(Item).get(2) - item1.foo = 'bar' - item2.bar = 'foo' + item1.foo = "bar" + item2.bar = "foo" The :meth:`_orm.Session.begin` method and the session's "autobegin" process use the same sequence of steps to begin the transaction. This includes @@ -453,6 +456,7 @@ a decorator may be used:: import contextlib + @contextlib.contextmanager def transaction(session): if not session.in_transaction(): @@ -461,7 +465,6 @@ a decorator may be used:: else: yield - The above context manager may be used in the same way the "subtransaction" flag works, such as in the following example:: @@ -471,12 +474,14 @@ The above context manager may be used in the same way the with transaction(session): method_b(session) + # method_b also starts a transaction, but when # called from method_a participates in the ongoing # transaction. def method_b(session): with transaction(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -491,8 +496,10 @@ or methods to be concerned with the details of transaction demarcation:: def method_a(session): method_b(session) + def method_b(session): - session.add(SomeObject('bat', 'lala')) + session.add(SomeObject("bat", "lala")) + Session = sessionmaker(engine) @@ -518,13 +525,13 @@ also :meth:`_orm.Session.prepare` the session for interacting with transactions not managed by SQLAlchemy. To use two phase transactions set the flag ``twophase=True`` on the session:: - engine1 = create_engine('postgresql://db1') - engine2 = create_engine('postgresql://db2') + engine1 = create_engine("postgresql://db1") + engine2 = create_engine("postgresql://db2") Session = sessionmaker(twophase=True) # bind User operations to engine 1, Account operations to engine 2 - Session.configure(binds={User:engine1, Account:engine2}) + Session.configure(binds={User: engine1, Account: engine2}) session = Session() @@ -534,7 +541,6 @@ transactions set the flag ``twophase=True`` on the session:: # before committing both transactions session.commit() - .. _session_transaction_isolation: Setting Transaction Isolation Levels / DBAPI AUTOCOMMIT @@ -583,13 +589,11 @@ in all cases, which is then used as the source of connectivity for a from sqlalchemy.orm import sessionmaker eng = create_engine( - "postgresql://scott:tiger@localhost/test", - isolation_level='REPEATABLE READ' + "postgresql://scott:tiger@localhost/test", isolation_level="REPEATABLE READ" ) Session = sessionmaker(eng) - Another option, useful if there are to be two engines with different isolation levels at once, is to use the :meth:`_engine.Engine.execution_options` method, which will produce a shallow copy of the original :class:`_engine.Engine` which @@ -607,7 +611,6 @@ operations:: transactional_session = sessionmaker(eng) autocommit_session = sessionmaker(autocommit_engine) - Above, both "``eng``" and ``"autocommit_engine"`` share the same dialect and connection pool. However the "AUTOCOMMIT" mode will be set upon connections when they are acquired from the ``autocommit_engine``. The two @@ -660,7 +663,6 @@ methods:: with Session() as session: session.bind_mapper(User, autocommit_engine) - Setting Isolation for Individual Transactions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -683,7 +685,7 @@ level on a per-connection basis can be affected by using the # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a real # database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... @@ -715,15 +717,13 @@ the per-connection-transaction isolation level:: # call connection() with options before any other operations proceed. # this will procure a new connection from the bound engine and begin a # real database transaction. - sess.connection(execution_options={'isolation_level': 'SERIALIZABLE'}) + sess.connection(execution_options={"isolation_level": "SERIALIZABLE"}) # ... work with session in SERIALIZABLE isolation level... # outside the block, the transaction has been committed. the connection is # released and reverted to its previous isolation level. - - Tracking Transaction State with Events -------------------------------------- @@ -765,7 +765,8 @@ are reverted:: # global application scope. create Session class, engine Session = sessionmaker() - engine = create_engine('postgresql://...') + engine = create_engine("postgresql://...") + class SomeTest(TestCase): def setUp(self): @@ -775,11 +776,9 @@ are reverted:: # begin a non-ORM transaction self.trans = self.connection.begin() - # bind an individual Session to the connection self.session = Session(bind=self.connection) - ### optional ### # if the database supports SAVEPOINT (SQLite needs special diff --git a/doc/build/orm/tutorial.rst b/doc/build/orm/tutorial.rst index fb52023420c..327957e9f60 100644 --- a/doc/build/orm/tutorial.rst +++ b/doc/build/orm/tutorial.rst @@ -72,7 +72,7 @@ Version Check A quick check to verify that we are on at least **version 1.4** of SQLAlchemy:: >>> import sqlalchemy - >>> sqlalchemy.__version__ # doctest:+SKIP + >>> sqlalchemy.__version__ # doctest:+SKIP 1.4.0 Connecting @@ -82,7 +82,7 @@ For this tutorial we will use an in-memory-only SQLite database. To connect we use :func:`~sqlalchemy.create_engine`:: >>> from sqlalchemy import create_engine - >>> engine = create_engine('sqlite:///:memory:', echo=True) + >>> engine = create_engine("sqlite:///:memory:", echo=True) The ``echo`` flag is a shortcut to setting up SQLAlchemy logging, which is accomplished via Python's standard ``logging`` module. With it enabled, we'll @@ -146,7 +146,7 @@ the table name, and names and datatypes of columns:: >>> from sqlalchemy import Column, Integer, String >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) @@ -154,8 +154,11 @@ the table name, and names and datatypes of columns:: ... nickname = Column(String) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) .. sidebar:: Tip @@ -196,7 +199,7 @@ our table, known as :term:`table metadata`. The object used by SQLAlchemy to r this information for a specific table is called the :class:`_schema.Table` object, and here Declarative has made one for us. We can see this object by inspecting the ``__table__`` attribute:: - >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE + >>> User.__table__ # doctest: +NORMALIZE_WHITESPACE Table('users', MetaData(), Column('id', Integer(), table=, primary_key=True, nullable=False), Column('name', String(), table=), @@ -269,21 +272,25 @@ the actual ``CREATE TABLE`` statement: without being instructed. For that, you use the :class:`~sqlalchemy.schema.Sequence` construct:: from sqlalchemy import Sequence - Column(Integer, Sequence('user_id_seq'), primary_key=True) + + Column(Integer, Sequence("user_id_seq"), primary_key=True) A full, foolproof :class:`~sqlalchemy.schema.Table` generated via our declarative mapping is therefore:: class User(Base): - __tablename__ = 'users' - id = Column(Integer, Sequence('user_id_seq'), primary_key=True) + __tablename__ = "users" + id = Column(Integer, Sequence("user_id_seq"), primary_key=True) name = Column(String(50)) fullname = Column(String(50)) nickname = Column(String(50)) def __repr__(self): return "" % ( - self.name, self.fullname, self.nickname) + self.name, + self.fullname, + self.nickname, + ) We include this more verbose table definition separately to highlight the difference between a minimal construct geared primarily @@ -296,7 +303,7 @@ Create an Instance of the Mapped Class With mappings complete, let's now create and inspect a ``User`` object:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> ed_user.name 'ed' >>> ed_user.nickname @@ -383,7 +390,7 @@ Adding and Updating Objects To persist our ``User`` object, we :meth:`~.Session.add` it to our :class:`~sqlalchemy.orm.session.Session`:: - >>> ed_user = User(name='ed', fullname='Ed Jones', nickname='edsnickname') + >>> ed_user = User(name="ed", fullname="Ed Jones", nickname="edsnickname") >>> session.add(ed_user) At this point, we say that the instance is **pending**; no SQL has yet been issued @@ -401,7 +408,9 @@ added: .. sourcecode:: python+sql - {sql}>>> our_user = session.query(User).filter_by(name='ed').first() # doctest:+NORMALIZE_WHITESPACE + {sql}>>> our_user = ( + ... session.query(User).filter_by(name="ed").first() + ... ) # doctest:+NORMALIZE_WHITESPACE BEGIN (implicit) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) [...] ('ed', 'Ed Jones', 'edsnickname') @@ -440,16 +449,19 @@ We can add more ``User`` objects at once using .. sourcecode:: python+sql - >>> session.add_all([ - ... User(name='wendy', fullname='Wendy Williams', nickname='windy'), - ... User(name='mary', fullname='Mary Contrary', nickname='mary'), - ... User(name='fred', fullname='Fred Flintstone', nickname='freddy')]) + >>> session.add_all( + ... [ + ... User(name="wendy", fullname="Wendy Williams", nickname="windy"), + ... User(name="mary", fullname="Mary Contrary", nickname="mary"), + ... User(name="fred", fullname="Fred Flintstone", nickname="freddy"), + ... ] + ... ) Also, we've decided Ed's nickname isn't that great, so lets change it: .. sourcecode:: python+sql - >>> ed_user.nickname = 'eddie' + >>> ed_user.nickname = "eddie" The :class:`~sqlalchemy.orm.session.Session` is paying attention. It knows, for example, that ``Ed Jones`` has been modified: @@ -498,7 +510,7 @@ If we look at Ed's ``id`` attribute, which earlier was ``None``, it now has a va .. sourcecode:: python+sql - {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE + {sql}>>> ed_user.id # doctest: +NORMALIZE_WHITESPACE BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -535,20 +547,20 @@ we can roll back changes made too. Let's make two changes that we'll revert; .. sourcecode:: python+sql - >>> ed_user.name = 'Edwardo' + >>> ed_user.name = "Edwardo" and we'll add another erroneous user, ``fake_user``: .. sourcecode:: python+sql - >>> fake_user = User(name='fakeuser', fullname='Invalid', nickname='12345') + >>> fake_user = User(name="fakeuser", fullname="Invalid", nickname="12345") >>> session.add(fake_user) Querying the session, we can see that they're flushed into the current transaction: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['Edwardo', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["Edwardo", "fakeuser"])).all() UPDATE users SET name=? WHERE users.id = ? [...] ('Edwardo', 1) INSERT INTO users (name, fullname, nickname) VALUES (?, ?, ?) @@ -588,7 +600,7 @@ issuing a SELECT illustrates the changes made to the database: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.in_(['ed', 'fakeuser'])).all() + {sql}>>> session.query(User).filter(User.name.in_(["ed", "fakeuser"])).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -655,7 +667,7 @@ class: .. sourcecode:: python+sql {sql}>>> for row in session.query(User, User.name).all(): - ... print(row.User, row.name) + ... print(row.User, row.name) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -675,8 +687,8 @@ is mapped to one (such as ``User.name``): .. sourcecode:: python+sql - {sql}>>> for row in session.query(User.name.label('name_label')).all(): - ... print(row.name_label) + {sql}>>> for row in session.query(User.name.label("name_label")).all(): + ... print(row.name_label) SELECT users.name AS name_label FROM users [...] (){stop} @@ -692,10 +704,10 @@ entities are present in the call to :meth:`~.Session.query`, can be controlled u .. sourcecode:: python+sql >>> from sqlalchemy.orm import aliased - >>> user_alias = aliased(User, name='user_alias') + >>> user_alias = aliased(User, name="user_alias") {sql}>>> for row in session.query(user_alias, user_alias.name).all(): - ... print(row.user_alias) + ... print(row.user_alias) SELECT user_alias.id AS user_alias_id, user_alias.name AS user_alias_name, user_alias.fullname AS user_alias_fullname, @@ -715,7 +727,7 @@ conjunction with ORDER BY: .. sourcecode:: python+sql {sql}>>> for u in session.query(User).order_by(User.id)[1:3]: - ... print(u) + ... print(u) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -731,9 +743,8 @@ and filtering results, which is accomplished either with .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter_by(fullname='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter_by(fullname="Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -745,9 +756,8 @@ operators with the class-level attributes on your mapped class: .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.fullname=='Ed Jones'): - ... print(name) + {sql}>>> for (name,) in session.query(User.name).filter(User.fullname == "Ed Jones"): + ... print(name) SELECT users.name AS users_name FROM users WHERE users.fullname = ? [...] ('Ed Jones',) @@ -762,10 +772,10 @@ users named "ed" with a full name of "Ed Jones", you can call .. sourcecode:: python+sql - {sql}>>> for user in session.query(User).\ - ... filter(User.name=='ed').\ - ... filter(User.fullname=='Ed Jones'): - ... print(user) + {sql}>>> for user in ( + ... session.query(User).filter(User.name == "ed").filter(User.fullname == "Ed Jones") + ... ): + ... print(user) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -783,11 +793,11 @@ Here's a rundown of some of the most common operators used in * :meth:`equals <.ColumnOperators.__eq__>`:: - query.filter(User.name == 'ed') + query.filter(User.name == "ed") * :meth:`not equals <.ColumnOperators.__ne__>`:: - query.filter(User.name != 'ed') + query.filter(User.name != "ed") * :meth:`LIKE <.ColumnOperators.like>`:: @@ -808,23 +818,21 @@ Here's a rundown of some of the most common operators used in * :meth:`IN <.ColumnOperators.in_>`:: - query.filter(User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(User.name.in_(["ed", "wendy", "jack"])) # works with query objects too: - query.filter(User.name.in_( - session.query(User.name).filter(User.name.like('%ed%')) - )) + query.filter(User.name.in_(session.query(User.name).filter(User.name.like("%ed%")))) # use tuple_() for composite (multi-column) queries from sqlalchemy import tuple_ + query.filter( - tuple_(User.name, User.nickname).\ - in_([('ed', 'edsnickname'), ('wendy', 'windy')]) + tuple_(User.name, User.nickname).in_([("ed", "edsnickname"), ("wendy", "windy")]) ) * :meth:`NOT IN <.ColumnOperators.not_in>`:: - query.filter(~User.name.in_(['ed', 'wendy', 'jack'])) + query.filter(~User.name.in_(["ed", "wendy", "jack"])) * :meth:`IS NULL <.ColumnOperators.is_>`:: @@ -886,7 +894,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User).filter(User.name.like('%ed')).order_by(User.id) + >>> query = session.query(User).filter(User.name.like("%ed")).order_by(User.id) {sql}>>> query.all() SELECT users.id AS users_id, users.name AS users_name, @@ -964,8 +972,7 @@ database results. Here's a brief tour: .. sourcecode:: python+sql - >>> query = session.query(User.id).filter(User.name == 'ed').\ - ... order_by(User.id) + >>> query = session.query(User.id).filter(User.name == "ed").order_by(User.id) {sql}>>> query.scalar() SELECT users.id AS users_id FROM users @@ -988,9 +995,7 @@ by most applicable methods. For example, .. sourcecode:: python+sql >>> from sqlalchemy import text - {sql}>>> for user in session.query(User).\ - ... filter(text("id<224")).\ - ... order_by(text("id")).all(): + {sql}>>> for user in session.query(User).filter(text("id<224")).order_by(text("id")).all(): ... print(user.name) SELECT users.id AS users_id, users.name AS users_name, @@ -1010,8 +1015,9 @@ method: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(text("id<:value and name=:name")).\ - ... params(value=224, name='fred').order_by(User.id).one() + {sql}>>> session.query(User).filter(text("id<:value and name=:name")).params( + ... value=224, name="fred" + ... ).order_by(User.id).one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1029,8 +1035,9 @@ returned by the SQL statement based on column name: .. sourcecode:: python+sql - {sql}>>> session.query(User).from_statement( - ... text("SELECT * FROM users where name=:name")).params(name='ed').all() + {sql}>>> session.query(User).from_statement(text("SELECT * FROM users where name=:name")).params( + ... name="ed" + ... ).all() SELECT * FROM users where name=? [...] ('ed',) {stop}[] @@ -1041,10 +1048,9 @@ columns are passed in the desired order to :meth:`_expression.TextClause.columns .. sourcecode:: python+sql - >>> stmt = text("SELECT name, id, fullname, nickname " - ... "FROM users where name=:name") + >>> stmt = text("SELECT name, id, fullname, nickname " "FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id, User.fullname, User.nickname) - {sql}>>> session.query(User).from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User).from_statement(stmt).params(name="ed").all() SELECT name, id, fullname, nickname FROM users where name=? [...] ('ed',) {stop}[] @@ -1058,8 +1064,7 @@ any other case: >>> stmt = text("SELECT name, id FROM users where name=:name") >>> stmt = stmt.columns(User.name, User.id) - {sql}>>> session.query(User.id, User.name).\ - ... from_statement(stmt).params(name='ed').all() + {sql}>>> session.query(User.id, User.name).from_statement(stmt).params(name="ed").all() SELECT name, id FROM users where name=? [...] ('ed',) {stop}[(1, u'ed')] @@ -1077,7 +1082,7 @@ counting called :meth:`_query.Query.count`: .. sourcecode:: python+sql - {sql}>>> session.query(User).filter(User.name.like('%ed')).count() + {sql}>>> session.query(User).filter(User.name.like("%ed")).count() SELECT count(*) AS count_1 FROM (SELECT users.id AS users_id, users.name AS users_name, @@ -1125,7 +1130,7 @@ To achieve our simple ``SELECT count(*) FROM table``, we can apply it as: .. sourcecode:: python+sql - {sql}>>> session.query(func.count('*')).select_from(User).scalar() + {sql}>>> session.query(func.count("*")).select_from(User).scalar() SELECT count(?) AS count_1 FROM users [...] ('*',) @@ -1160,18 +1165,17 @@ declarative, we define this table along with its mapped class, ``Address``: >>> from sqlalchemy.orm import relationship >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): ... return "" % self.email_address - >>> User.addresses = relationship( - ... "Address", order_by=Address.id, back_populates="user") + >>> User.addresses = relationship("Address", order_by=Address.id, back_populates="user") The above class introduces the :class:`_schema.ForeignKey` construct, which is a directive applied to :class:`_schema.Column` that indicates that values in this @@ -1269,7 +1273,7 @@ default, the collection is a Python list. .. sourcecode:: python+sql - >>> jack = User(name='jack', fullname='Jack Bean', nickname='gjffdd') + >>> jack = User(name="jack", fullname="Jack Bean", nickname="gjffdd") >>> jack.addresses [] @@ -1279,8 +1283,9 @@ just assign a full list directly: .. sourcecode:: python+sql >>> jack.addresses = [ - ... Address(email_address='jack@google.com'), - ... Address(email_address='j25@yahoo.com')] + ... Address(email_address="jack@google.com"), + ... Address(email_address="j25@yahoo.com"), + ... ] When using a bidirectional relationship, elements added in one direction automatically become visible in the other direction. This behavior occurs @@ -1316,8 +1321,7 @@ Querying for Jack, we get just Jack back. No SQL is yet issued for Jack's addre .. sourcecode:: python+sql - {sql}>>> jack = session.query(User).\ - ... filter_by(name='jack').one() + {sql}>>> jack = session.query(User).filter_by(name="jack").one() BEGIN (implicit) SELECT users.id AS users_id, users.name AS users_name, @@ -1366,10 +1370,12 @@ Below we load the ``User`` and ``Address`` entities at once using this method: .. sourcecode:: python+sql - {sql}>>> for u, a in session.query(User, Address).\ - ... filter(User.id==Address.user_id).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all(): + {sql}>>> for u, a in ( + ... session.query(User, Address) + ... .filter(User.id == Address.user_id) + ... .filter(Address.email_address == "jack@google.com") + ... .all() + ... ): ... print(u) ... print(a) SELECT users.id AS users_id, @@ -1391,9 +1397,9 @@ using the :meth:`_query.Query.join` method: .. sourcecode:: python+sql - {sql}>>> session.query(User).join(Address).\ - ... filter(Address.email_address=='jack@google.com').\ - ... all() + {sql}>>> session.query(User).join(Address).filter( + ... Address.email_address == "jack@google.com" + ... ).all() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1408,15 +1414,17 @@ and ``Address`` because there's only one foreign key between them. If there were no foreign keys, or several, :meth:`_query.Query.join` works better when one of the following forms are used:: - query.join(Address, User.id==Address.user_id) # explicit condition - query.join(User.addresses) # specify relationship from left to right - query.join(Address, User.addresses) # same, with explicit target - query.join(User.addresses.and_(Address.name != 'foo')) # use relationship + additional ON criteria + query.join(Address, User.id == Address.user_id) # explicit condition + query.join(User.addresses) # specify relationship from left to right + query.join(Address, User.addresses) # same, with explicit target + query.join( + User.addresses.and_(Address.name != "foo") + ) # use relationship + additional ON criteria As you would expect, the same idea is used for "outer" joins, using the :meth:`_query.Query.outerjoin` function:: - query.outerjoin(User.addresses) # LEFT OUTER JOIN + query.outerjoin(User.addresses) # LEFT OUTER JOIN The reference documentation for :meth:`_query.Query.join` contains detailed information and examples of the calling styles accepted by this method; :meth:`_query.Query.join` @@ -1431,7 +1439,6 @@ is an important method at the center of usage for any SQL-fluent application. query = session.query(User, Address).select_from(Address).join(User) - .. _ormtutorial_aliases: Using Aliases @@ -1453,12 +1460,13 @@ distinct email addresses at the same time: >>> from sqlalchemy.orm import aliased >>> adalias1 = aliased(Address) >>> adalias2 = aliased(Address) - {sql}>>> for username, email1, email2 in \ - ... session.query(User.name, adalias1.email_address, adalias2.email_address).\ - ... join(User.addresses.of_type(adalias1)).\ - ... join(User.addresses.of_type(adalias2)).\ - ... filter(adalias1.email_address=='jack@google.com').\ - ... filter(adalias2.email_address=='j25@yahoo.com'): + {sql}>>> for username, email1, email2 in ( + ... session.query(User.name, adalias1.email_address, adalias2.email_address) + ... .join(User.addresses.of_type(adalias1)) + ... .join(User.addresses.of_type(adalias2)) + ... .filter(adalias1.email_address == "jack@google.com") + ... .filter(adalias2.email_address == "j25@yahoo.com") + ... ): ... print(username, email1, email2) SELECT users.name AS users_name, addresses_1.email_address AS addresses_1_email_address, @@ -1501,9 +1509,11 @@ representing the statement generated by a particular construct, which are described in :ref:`sqlexpression_toplevel`:: >>> from sqlalchemy.sql import func - >>> stmt = session.query(Address.user_id, func.count('*').\ - ... label('address_count')).\ - ... group_by(Address.user_id).subquery() + >>> stmt = ( + ... session.query(Address.user_id, func.count("*").label("address_count")) + ... .group_by(Address.user_id) + ... .subquery() + ... ) The ``func`` keyword generates SQL functions, and the ``subquery()`` method on :class:`~sqlalchemy.orm.query.Query` produces a SQL expression construct @@ -1517,8 +1527,11 @@ accessible through an attribute called ``c``: .. sourcecode:: python+sql - {sql}>>> for u, count in session.query(User, stmt.c.address_count).\ - ... outerjoin(stmt, User.id==stmt.c.user_id).order_by(User.id): + {sql}>>> for u, count in ( + ... session.query(User, stmt.c.address_count) + ... .outerjoin(stmt, User.id == stmt.c.user_id) + ... .order_by(User.id) + ... ): ... print(u, count) SELECT users.id AS users_id, users.name AS users_name, @@ -1546,12 +1559,11 @@ to associate an "alias" of a mapped class to a subquery: .. sourcecode:: python+sql - {sql}>>> stmt = session.query(Address).\ - ... filter(Address.email_address != 'j25@yahoo.com').\ - ... subquery() + {sql}>>> stmt = ( + ... session.query(Address).filter(Address.email_address != "j25@yahoo.com").subquery() + ... ) >>> addr_alias = aliased(Address, stmt) - >>> for user, address in session.query(User, addr_alias).\ - ... join(addr_alias, User.addresses): + >>> for user, address in session.query(User, addr_alias).join(addr_alias, User.addresses): ... print(user) ... print(address) SELECT users.id AS users_id, @@ -1585,8 +1597,8 @@ There is an explicit EXISTS construct, which looks like this: .. sourcecode:: python+sql >>> from sqlalchemy.sql import exists - >>> stmt = exists().where(Address.user_id==User.id) - {sql}>>> for name, in session.query(User.name).filter(stmt): + >>> stmt = exists().where(Address.user_id == User.id) + {sql}>>> for (name,) in session.query(User.name).filter(stmt): ... print(name) SELECT users.name AS users_name FROM users @@ -1602,8 +1614,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any()): + {sql}>>> for (name,) in session.query(User.name).filter(User.addresses.any()): ... print(name) SELECT users.name AS users_name FROM users @@ -1617,8 +1628,9 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> for name, in session.query(User.name).\ - ... filter(User.addresses.any(Address.email_address.like('%google%'))): + {sql}>>> for (name,) in session.query(User.name).filter( + ... User.addresses.any(Address.email_address.like("%google%")) + ... ): ... print(name) SELECT users.name AS users_name FROM users @@ -1634,8 +1646,7 @@ usage of EXISTS automatically. Above, the statement can be expressed along the .. sourcecode:: python+sql - {sql}>>> session.query(Address).\ - ... filter(~Address.user.has(User.name=='jack')).all() + {sql}>>> session.query(Address).filter(~Address.user.has(User.name == "jack")).all() SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, addresses.user_id AS addresses_user_id @@ -1671,18 +1682,18 @@ and behavior: * :meth:`~.RelationshipProperty.Comparator.any` (used for collections):: - query.filter(User.addresses.any(Address.email_address == 'bar')) + query.filter(User.addresses.any(Address.email_address == "bar")) # also takes keyword arguments: - query.filter(User.addresses.any(email_address='bar')) + query.filter(User.addresses.any(email_address="bar")) * :meth:`~.RelationshipProperty.Comparator.has` (used for scalar references):: - query.filter(Address.user.has(name='ed')) + query.filter(Address.user.has(name="ed")) * :meth:`_query.Query.with_parent` (used for any relationship):: - session.query(Address).with_parent(someuser, 'addresses') + session.query(Address).with_parent(someuser, "addresses") Eager Loading ============= @@ -1710,9 +1721,12 @@ at once: .. sourcecode:: python+sql >>> from sqlalchemy.orm import selectinload - {sql}>>> jack = session.query(User).\ - ... options(selectinload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User) + ... .options(selectinload(User.addresses)) + ... .filter_by(name="jack") + ... .one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1749,9 +1763,9 @@ will emit the extra join regardless: >>> from sqlalchemy.orm import joinedload - {sql}>>> jack = session.query(User).\ - ... options(joinedload(User.addresses)).\ - ... filter_by(name='jack').one() + {sql}>>> jack = ( + ... session.query(User).options(joinedload(User.addresses)).filter_by(name="jack").one() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1812,11 +1826,13 @@ attribute: .. sourcecode:: python+sql >>> from sqlalchemy.orm import contains_eager - {sql}>>> jacks_addresses = session.query(Address).\ - ... join(Address.user).\ - ... filter(User.name=='jack').\ - ... options(contains_eager(Address.user)).\ - ... all() + {sql}>>> jacks_addresses = ( + ... session.query(Address) + ... .join(Address.user) + ... .filter(User.name == "jack") + ... .options(contains_eager(Address.user)) + ... .all() + ... ) SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -1846,7 +1862,7 @@ in the session, then we'll issue a ``count`` query to see that no rows remain: .. sourcecode:: python+sql >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() UPDATE addresses SET user_id=? WHERE addresses.id = ? [...] ((None, 1), (None, 2)) DELETE FROM users WHERE users.id = ? @@ -1866,8 +1882,8 @@ So far, so good. How about Jack's ``Address`` objects ? .. sourcecode:: python+sql {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) - ... ).count() + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) + ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address, @@ -1905,28 +1921,32 @@ Next we'll declare the ``User`` class, adding in the ``addresses`` relationship including the cascade configuration (we'll leave the constructor out too):: >>> class User(Base): - ... __tablename__ = 'users' + ... __tablename__ = "users" ... ... id = Column(Integer, primary_key=True) ... name = Column(String) ... fullname = Column(String) ... nickname = Column(String) ... - ... addresses = relationship("Address", back_populates='user', - ... cascade="all, delete, delete-orphan") + ... addresses = relationship( + ... "Address", back_populates="user", cascade="all, delete, delete-orphan" + ... ) ... ... def __repr__(self): - ... return "" % ( - ... self.name, self.fullname, self.nickname) + ... return "" % ( + ... self.name, + ... self.fullname, + ... self.nickname, + ... ) Then we recreate ``Address``, noting that in this case we've created the ``Address.user`` relationship via the ``User`` class already:: >>> class Address(Base): - ... __tablename__ = 'addresses' + ... __tablename__ = "addresses" ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... user = relationship("User", back_populates="addresses") ... ... def __repr__(self): @@ -1963,7 +1983,7 @@ being deleted: # only one address remains {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() DELETE FROM addresses WHERE addresses.id = ? [...] (2,) @@ -1983,7 +2003,7 @@ with the user: >>> session.delete(jack) - {sql}>>> session.query(User).filter_by(name='jack').count() + {sql}>>> session.query(User).filter_by(name="jack").count() DELETE FROM addresses WHERE addresses.id = ? [...] (1,) DELETE FROM users WHERE users.id = ? @@ -1999,7 +2019,7 @@ with the user: {stop}0 {sql}>>> session.query(Address).filter( - ... Address.email_address.in_(['jack@google.com', 'j25@yahoo.com']) + ... Address.email_address.in_(["jack@google.com", "j25@yahoo.com"]) ... ).count() SELECT count(*) AS count_1 FROM (SELECT addresses.id AS addresses_id, @@ -2032,9 +2052,11 @@ to serve as the association table. This looks like the following:: >>> from sqlalchemy import Table, Text >>> # association table - >>> post_keywords = Table('post_keywords', Base.metadata, - ... Column('post_id', ForeignKey('posts.id'), primary_key=True), - ... Column('keyword_id', ForeignKey('keywords.id'), primary_key=True) + >>> post_keywords = Table( + ... "post_keywords", + ... Base.metadata, + ... Column("post_id", ForeignKey("posts.id"), primary_key=True), + ... Column("keyword_id", ForeignKey("keywords.id"), primary_key=True), ... ) Above, we can see declaring a :class:`_schema.Table` directly is a little different @@ -2048,17 +2070,15 @@ Next we define ``BlogPost`` and ``Keyword``, using complementary table as an association table:: >>> class BlogPost(Base): - ... __tablename__ = 'posts' + ... __tablename__ = "posts" ... ... id = Column(Integer, primary_key=True) - ... user_id = Column(Integer, ForeignKey('users.id')) + ... user_id = Column(Integer, ForeignKey("users.id")) ... headline = Column(String(255), nullable=False) ... body = Column(Text) ... ... # many to many BlogPost<->Keyword - ... keywords = relationship('Keyword', - ... secondary=post_keywords, - ... back_populates='posts') + ... keywords = relationship("Keyword", secondary=post_keywords, back_populates="posts") ... ... def __init__(self, headline, body, author): ... self.author = author @@ -2070,13 +2090,11 @@ table as an association table:: >>> class Keyword(Base): - ... __tablename__ = 'keywords' + ... __tablename__ = "keywords" ... ... id = Column(Integer, primary_key=True) ... keyword = Column(String(50), nullable=False, unique=True) - ... posts = relationship('BlogPost', - ... secondary=post_keywords, - ... back_populates='keywords') + ... posts = relationship("BlogPost", secondary=post_keywords, back_populates="keywords") ... ... def __init__(self, keyword): ... self.keyword = keyword @@ -2144,9 +2162,7 @@ Usage is not too different from what we've been doing. Let's give Wendy some bl .. sourcecode:: python+sql - {sql}>>> wendy = session.query(User).\ - ... filter_by(name='wendy').\ - ... one() + {sql}>>> wendy = session.query(User).filter_by(name="wendy").one() SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, @@ -2163,8 +2179,8 @@ have any yet, so we can just create them: .. sourcecode:: python+sql - >>> post.keywords.append(Keyword('wendy')) - >>> post.keywords.append(Keyword('firstpost')) + >>> post.keywords.append(Keyword("wendy")) + >>> post.keywords.append(Keyword("firstpost")) We can now look up all blog posts with the keyword 'firstpost'. We'll use the ``any`` operator to locate "blog posts where any of its keywords has the @@ -2172,9 +2188,7 @@ keyword string 'firstpost'": .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.keywords.any(keyword="firstpost")).all() INSERT INTO keywords (keyword) VALUES (?) [...] ('wendy',) INSERT INTO keywords (keyword) VALUES (?) @@ -2201,10 +2215,9 @@ the query to narrow down to that ``User`` object as a parent: .. sourcecode:: python+sql - {sql}>>> session.query(BlogPost).\ - ... filter(BlogPost.author==wendy).\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> session.query(BlogPost).filter(BlogPost.author == wendy).filter( + ... BlogPost.keywords.any(keyword="firstpost") + ... ).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, @@ -2223,9 +2236,7 @@ relationship, to query straight from there: .. sourcecode:: python+sql - {sql}>>> wendy.posts.\ - ... filter(BlogPost.keywords.any(keyword='firstpost')).\ - ... all() + {sql}>>> wendy.posts.filter(BlogPost.keywords.any(keyword="firstpost")).all() SELECT posts.id AS posts_id, posts.user_id AS posts_user_id, posts.headline AS posts_headline, diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index 30388eb8d24..790c1c1f92e 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -55,15 +55,13 @@ to the mapped table, then establish it as the ``version_id_col`` within the mapper options:: class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_id = Column(Integer, nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - "version_id_col": version_id - } + __mapper_args__ = {"version_id_col": version_id} .. note:: It is **strongly recommended** that the ``version_id`` column be made NOT NULL. The versioning feature **does not support** a NULL @@ -105,16 +103,17 @@ support a native GUID type, but we illustrate here using a simple string):: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator':lambda version: uuid.uuid4().hex + "version_id_col": version_uuid, + "version_id_generator": lambda version: uuid.uuid4().hex, } The persistence engine will call upon ``uuid.uuid4()`` each time a @@ -148,17 +147,15 @@ class as follows:: from sqlalchemy import FetchedValue + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) name = Column(String(50), nullable=False) xmin = Column("xmin", String, system=True, server_default=FetchedValue()) - __mapper_args__ = { - 'version_id_col': xmin, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": xmin, "version_id_generator": False} With the above mapping, the ORM will rely upon the ``xmin`` column for automatically providing the new value of the version id counter. @@ -222,25 +219,24 @@ at our choosing:: import uuid + class User(Base): - __tablename__ = 'user' + __tablename__ = "user" id = Column(Integer, primary_key=True) version_uuid = Column(String(32), nullable=False) name = Column(String(50), nullable=False) - __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator': False - } + __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False} + - u1 = User(name='u1', version_uuid=uuid.uuid4()) + u1 = User(name="u1", version_uuid=uuid.uuid4()) session.add(u1) session.commit() - u1.name = 'u2' + u1.name = "u2" u1.version_uuid = uuid.uuid4() session.commit() @@ -252,7 +248,7 @@ for schemes where only certain classes of UPDATE are sensitive to concurrency issues:: # will leave version_uuid unchanged - u1.name = 'u3' + u1.name = "u3" session.commit() .. versionadded:: 0.9.0 diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 63aeb51a089..0d745cb319c 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -35,7 +35,7 @@ A simple example of :class:`_sql.Insert` illustrating the target table and the VALUES clause at once:: >>> from sqlalchemy import insert - >>> stmt = insert(user_table).values(name='spongebob', fullname="Spongebob Squarepants") + >>> stmt = insert(user_table).values(name="spongebob", fullname="Spongebob Squarepants") The above ``stmt`` variable is an instance of :class:`_sql.Insert`. Most SQL expressions can be stringified in place as a means to see the general @@ -122,8 +122,8 @@ illustrate this: ... insert(user_table), ... [ ... {"name": "sandy", "fullname": "Sandy Cheeks"}, - ... {"name": "patrick", "fullname": "Patrick Star"} - ... ] + ... {"name": "patrick", "fullname": "Patrick Star"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -167,19 +167,19 @@ construct automatically. >>> from sqlalchemy import select, bindparam >>> scalar_subq = ( - ... select(user_table.c.id). - ... where(user_table.c.name==bindparam('username')). - ... scalar_subquery() + ... select(user_table.c.id) + ... .where(user_table.c.name == bindparam("username")) + ... .scalar_subquery() ... ) >>> with engine.connect() as conn: ... result = conn.execute( ... insert(address_table).values(user_id=scalar_subq), ... [ - ... {"username": 'spongebob', "email_address": "spongebob@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@sqlalchemy.org"}, - ... {"username": 'sandy', "email_address": "sandy@squirrelpower.org"}, - ... ] + ... {"username": "spongebob", "email_address": "spongebob@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@sqlalchemy.org"}, + ... {"username": "sandy", "email_address": "sandy@squirrelpower.org"}, + ... ], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -221,7 +221,9 @@ method; in this case, the :class:`_engine.Result` object that's returned when the statement is executed has rows which can be fetched:: - >>> insert_stmt = insert(address_table).returning(address_table.c.id, address_table.c.email_address) + >>> insert_stmt = insert(address_table).returning( + ... address_table.c.id, address_table.c.email_address + ... ) >>> print(insert_stmt) {opensql}INSERT INTO address (id, user_id, email_address) VALUES (:id, :user_id, :email_address) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index eab9dccefd0..9b0b887da15 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -36,7 +36,7 @@ each method builds more state onto the object. Like the other SQL constructs, it can be stringified in place:: >>> from sqlalchemy import select - >>> stmt = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt = select(user_table).where(user_table.c.name == "spongebob") >>> print(stmt) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -71,7 +71,7 @@ elements within each row: .. sourcecode:: pycon+sql - >>> stmt = select(User).where(User.name == 'spongebob') + >>> stmt = select(User).where(User.name == "spongebob") >>> with Session(engine) as session: ... for row in session.execute(stmt): ... print(row) @@ -196,9 +196,7 @@ attribute of the ``User`` entity as the first element of the row, and combine it with full ``Address`` entities in the second element:: >>> session.execute( - ... select(User.name, Address). - ... where(User.id==Address.user_id). - ... order_by(Address.id) + ... select(User.name, Address).where(User.id == Address.user_id).order_by(Address.id) ... ).all() {opensql}SELECT user_account.name, address.id, address.email_address, address.user_id FROM user_account, address @@ -226,11 +224,9 @@ when referring to arbitrary SQL expressions in a result row by name: .. sourcecode:: pycon+sql >>> from sqlalchemy import func, cast - >>> stmt = ( - ... select( - ... ("Username: " + user_table.c.name).label("username"), - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select( + ... ("Username: " + user_table.c.name).label("username"), + ... ).order_by(user_table.c.name) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): ... print(f"{row.username}") @@ -269,11 +265,7 @@ a hardcoded string literal ``'some label'`` and embed it within the SELECT statement:: >>> from sqlalchemy import text - >>> stmt = ( - ... select( - ... text("'some phrase'"), user_table.c.name - ... ).order_by(user_table.c.name) - ... ) + >>> stmt = select(text("'some phrase'"), user_table.c.name).order_by(user_table.c.name) >>> with engine.connect() as conn: ... print(conn.execute(stmt).all()) {opensql}BEGIN (implicit) @@ -295,10 +287,8 @@ towards in subqueries and other expressions:: >>> from sqlalchemy import literal_column - >>> stmt = ( - ... select( - ... literal_column("'some phrase'").label("p"), user_table.c.name - ... ).order_by(user_table.c.name) + >>> stmt = select(literal_column("'some phrase'").label("p"), user_table.c.name).order_by( + ... user_table.c.name ... ) >>> with engine.connect() as conn: ... for row in conn.execute(stmt): @@ -330,7 +320,7 @@ conjunction with Python operators such as ``==``, ``!=``, ``<``, ``>=`` etc. generate new SQL Expression objects, rather than plain boolean ``True``/``False`` values:: - >>> print(user_table.c.name == 'squidward') + >>> print(user_table.c.name == "squidward") user_account.name = :name_1 >>> print(address_table.c.user_id > 10) @@ -340,7 +330,7 @@ SQL Expression objects, rather than plain boolean ``True``/``False`` values:: We can use expressions like these to generate the WHERE clause by passing the resulting objects to the :meth:`_sql.Select.where` method:: - >>> print(select(user_table).where(user_table.c.name == 'squidward')) + >>> print(select(user_table).where(user_table.c.name == "squidward")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 @@ -350,9 +340,9 @@ To produce multiple expressions joined by AND, the :meth:`_sql.Select.where` method may be invoked any number of times:: >>> print( - ... select(address_table.c.email_address). - ... where(user_table.c.name == 'squidward'). - ... where(address_table.c.user_id == user_table.c.id) + ... select(address_table.c.email_address) + ... .where(user_table.c.name == "squidward") + ... .where(address_table.c.user_id == user_table.c.id) ... ) {opensql}SELECT address.email_address FROM address, user_account @@ -362,10 +352,8 @@ A single call to :meth:`_sql.Select.where` also accepts multiple expressions with the same effect:: >>> print( - ... select(address_table.c.email_address). - ... where( - ... user_table.c.name == 'squidward', - ... address_table.c.user_id == user_table.c.id + ... select(address_table.c.email_address).where( + ... user_table.c.name == "squidward", address_table.c.user_id == user_table.c.id ... ) ... ) {opensql}SELECT address.email_address @@ -378,11 +366,10 @@ of ORM entities:: >>> from sqlalchemy import and_, or_ >>> print( - ... select(Address.email_address). - ... where( + ... select(Address.email_address).where( ... and_( - ... or_(User.name == 'squidward', User.name == 'sandy'), - ... Address.user_id == User.id + ... or_(User.name == "squidward", User.name == "sandy"), + ... Address.user_id == User.id, ... ) ... ) ... ) @@ -396,9 +383,7 @@ popular method known as :meth:`_sql.Select.filter_by` which accepts keyword arguments that match to column keys or ORM attribute names. It will filter against the leftmost FROM clause or the last entity joined:: - >>> print( - ... select(User).filter_by(name='spongebob', fullname='Spongebob Squarepants') - ... ) + >>> print(select(User).filter_by(name="spongebob", fullname="Spongebob Squarepants")) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = :name_1 AND user_account.fullname = :fullname_1 @@ -440,8 +425,9 @@ method, which allows us to indicate the left and right side of the JOIN explicitly:: >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join_from(user_table, address_table) + ... select(user_table.c.name, address_table.c.email_address).join_from( + ... user_table, address_table + ... ) ... ) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -450,10 +436,7 @@ explicitly:: The other is the the :meth:`_sql.Select.join` method, which indicates only the right side of the JOIN, the left hand-side is inferred:: - >>> print( - ... select(user_table.c.name, address_table.c.email_address). - ... join(address_table) - ... ) + >>> print(select(user_table.c.name, address_table.c.email_address).join(address_table)) {opensql}SELECT user_account.name, address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -470,10 +453,7 @@ where we establish ``user_table`` as the first element in the FROM clause and :meth:`_sql.Select.join` to establish ``address_table`` as the second:: - >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table).join(address_table) - ... ) + >>> print(select(address_table.c.email_address).select_from(user_table).join(address_table)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -484,9 +464,7 @@ FROM clause. For example, to SELECT from the common SQL expression produce the SQL ``count()`` function:: >>> from sqlalchemy import func - >>> print ( - ... select(func.count('*')).select_from(user_table) - ... ) + >>> print(select(func.count("*")).select_from(user_table)) {opensql}SELECT count(:count_2) AS count_1 FROM user_account @@ -515,9 +493,9 @@ accept an additional argument for the ON clause, which is stated using the same SQL Expression mechanics as we saw about in :ref:`tutorial_select_where_clause`:: >>> print( - ... select(address_table.c.email_address). - ... select_from(user_table). - ... join(address_table, user_table.c.id == address_table.c.user_id) + ... select(address_table.c.email_address) + ... .select_from(user_table) + ... .join(address_table, user_table.c.id == address_table.c.user_id) ... ) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -539,15 +517,11 @@ accept keyword arguments :paramref:`_sql.Select.join.isouter` and :paramref:`_sql.Select.join.full` which will render LEFT OUTER JOIN and FULL OUTER JOIN, respectively:: - >>> print( - ... select(user_table).join(address_table, isouter=True) - ... ) + >>> print(select(user_table).join(address_table, isouter=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account LEFT OUTER JOIN address ON user_account.id = address.user_id{stop} - >>> print( - ... select(user_table).join(address_table, full=True) - ... ) + >>> print(select(user_table).join(address_table, full=True)) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account FULL OUTER JOIN address ON user_account.id = address.user_id{stop} @@ -644,10 +618,10 @@ than one address: >>> with engine.connect() as conn: ... result = conn.execute( - ... select(User.name, func.count(Address.id).label("count")). - ... join(Address). - ... group_by(User.name). - ... having(func.count(Address.id) > 1) + ... select(User.name, func.count(Address.id).label("count")) + ... .join(Address) + ... .group_by(User.name) + ... .having(func.count(Address.id) > 1) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -677,10 +651,11 @@ error if no match is found. The unary modifiers .. sourcecode:: pycon+sql >>> from sqlalchemy import func, desc - >>> stmt = select( - ... Address.user_id, - ... func.count(Address.id).label('num_addresses')).\ - ... group_by("user_id").order_by("user_id", desc("num_addresses")) + >>> stmt = ( + ... select(Address.user_id, func.count(Address.id).label("num_addresses")) + ... .group_by("user_id") + ... .order_by("user_id", desc("num_addresses")) + ... ) >>> print(stmt) {opensql}SELECT address.user_id, count(address.id) AS num_addresses FROM address GROUP BY address.user_id ORDER BY address.user_id, num_addresses DESC @@ -707,8 +682,9 @@ below for example returns all unique pairs of user names:: >>> user_alias_1 = user_table.alias() >>> user_alias_2 = user_table.alias() >>> print( - ... select(user_alias_1.c.name, user_alias_2.c.name). - ... join_from(user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id) + ... select(user_alias_1.c.name, user_alias_2.c.name).join_from( + ... user_alias_1, user_alias_2, user_alias_1.c.id > user_alias_2.c.id + ... ) ... ) {opensql}SELECT user_account_1.name, user_account_2.name AS name_1 FROM user_account AS user_account_1 @@ -730,11 +706,11 @@ while maintaining ORM functionality. The SELECT below selects from the >>> address_alias_1 = aliased(Address) >>> address_alias_2 = aliased(Address) >>> print( - ... select(User). - ... join_from(User, address_alias_1). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join_from(User, address_alias_2). - ... where(address_alias_2.email_address == 'patrick@gmail.com') + ... select(User) + ... .join_from(User, address_alias_1) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join_from(User, address_alias_2) + ... .where(address_alias_2.email_address == "patrick@gmail.com") ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account @@ -775,10 +751,11 @@ We can construct a :class:`_sql.Subquery` that will select an aggregate count of rows from the ``address`` table (aggregate functions and GROUP BY were introduced previously at :ref:`tutorial_group_by_w_aggregates`): - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).subquery() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .subquery() + ... ) Stringifying the subquery by itself without it being embedded inside of another :class:`_sql.Select` or other statement produces the plain SELECT statement @@ -804,11 +781,9 @@ With a selection of rows contained within the ``subq`` object, we can apply the object to a larger :class:`_sql.Select` that will join the data to the ``user_account`` table:: - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}SELECT user_account.name, user_account.fullname, anon_1.count @@ -834,16 +809,15 @@ the invocation of the :meth:`_sql.Select.subquery` method to use element in the same way, but the SQL rendered is the very different common table expression syntax:: - >>> subq = select( - ... func.count(address_table.c.id).label("count"), - ... address_table.c.user_id - ... ).group_by(address_table.c.user_id).cte() + >>> subq = ( + ... select(func.count(address_table.c.id).label("count"), address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .cte() + ... ) - >>> stmt = select( - ... user_table.c.name, - ... user_table.c.fullname, - ... subq.c.count - ... ).join_from(user_table, subq) + >>> stmt = select(user_table.c.name, user_table.c.fullname, subq.c.count).join_from( + ... user_table, subq + ... ) >>> print(stmt) {opensql}WITH anon_1 AS @@ -894,9 +868,13 @@ each ``Address`` object ultimately came from a subquery against the .. sourcecode:: python+sql - >>> subq = select(Address).where(~Address.email_address.like('%@aol.com')).subquery() + >>> subq = select(Address).where(~Address.email_address.like("%@aol.com")).subquery() >>> address_subq = aliased(Address, subq) - >>> stmt = select(User, address_subq).join_from(User, address_subq).order_by(User.id, address_subq.id) + >>> stmt = ( + ... select(User, address_subq) + ... .join_from(User, address_subq) + ... .order_by(User.id, address_subq.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -919,9 +897,13 @@ Another example follows, which is exactly the same except it makes use of the .. sourcecode:: python+sql - >>> cte_obj = select(Address).where(~Address.email_address.like('%@aol.com')).cte() + >>> cte_obj = select(Address).where(~Address.email_address.like("%@aol.com")).cte() >>> address_cte = aliased(Address, cte_obj) - >>> stmt = select(User, address_cte).join_from(User, address_cte).order_by(User.id, address_cte.id) + >>> stmt = ( + ... select(User, address_cte) + ... .join_from(User, address_cte) + ... .order_by(User.id, address_cte.id) + ... ) >>> with Session(engine) as session: ... for user, address in session.execute(stmt): ... print(f"{user} {address}") @@ -968,9 +950,11 @@ subquery is indicated explicitly by making use of the :meth:`_sql.Select.scalar_ method as below. It's default string form when stringified by itself renders as an ordinary SELECT statement that is selecting from two tables:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery() + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... ) >>> print(subq) {opensql}(SELECT count(address.id) AS count_1 FROM address, user_account @@ -1003,13 +987,13 @@ Simple correlated subqueries will usually do the right thing that's desired. However, in the case where the correlation is ambiguous, SQLAlchemy will let us know that more clarity is needed:: - >>> stmt = select( - ... user_table.c.name, - ... address_table.c.email_address, - ... subq.label("address_count") - ... ).\ - ... join_from(user_table, address_table).\ - ... order_by(user_table.c.id, address_table.c.id) + >>> stmt = ( + ... select( + ... user_table.c.name, address_table.c.email_address, subq.label("address_count") + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) + ... ) >>> print(stmt) Traceback (most recent call last): ... @@ -1021,9 +1005,12 @@ To specify that the ``user_table`` is the one we seek to correlate we specify this using the :meth:`_sql.ScalarSelect.correlate` or :meth:`_sql.ScalarSelect.correlate_except` methods:: - >>> subq = select(func.count(address_table.c.id)).\ - ... where(user_table.c.id == address_table.c.user_id).\ - ... scalar_subquery().correlate(user_table) + >>> subq = ( + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .scalar_subquery() + ... .correlate(user_table) + ... ) The statement then can return the data for this column like any other: @@ -1034,10 +1021,10 @@ The statement then can return the data for this column like any other: ... select( ... user_table.c.name, ... address_table.c.email_address, - ... subq.label("address_count") - ... ). - ... join_from(user_table, address_table). - ... order_by(user_table.c.id, address_table.c.id) + ... subq.label("address_count"), + ... ) + ... .join_from(user_table, address_table) + ... .order_by(user_table.c.id, address_table.c.id) ... ) ... print(result.all()) {opensql}BEGIN (implicit) @@ -1078,21 +1065,19 @@ use of LATERAL, selecting the "user account / count of email address" data as was discussed in the previous section:: >>> subq = ( - ... select( - ... func.count(address_table.c.id).label("address_count"), - ... address_table.c.email_address, - ... address_table.c.user_id, - ... ). - ... where(user_table.c.id == address_table.c.user_id). - ... lateral() + ... select( + ... func.count(address_table.c.id).label("address_count"), + ... address_table.c.email_address, + ... address_table.c.user_id, + ... ) + ... .where(user_table.c.id == address_table.c.user_id) + ... .lateral() + ... ) + >>> stmt = ( + ... select(user_table.c.name, subq.c.address_count, subq.c.email_address) + ... .join_from(user_table, subq) + ... .order_by(user_table.c.id, subq.c.email_address) ... ) - >>> stmt = select( - ... user_table.c.name, - ... subq.c.address_count, - ... subq.c.email_address - ... ).\ - ... join_from(user_table, subq).\ - ... order_by(user_table.c.id, subq.c.email_address) >>> print(stmt) {opensql}SELECT user_account.name, anon_1.address_count, anon_1.email_address FROM user_account @@ -1143,8 +1128,8 @@ that it has fewer methods. The :class:`_sql.CompoundSelect` produced by :meth:`_engine.Connection.execute`:: >>> from sqlalchemy import union_all - >>> stmt1 = select(user_table).where(user_table.c.name == 'sandy') - >>> stmt2 = select(user_table).where(user_table.c.name == 'spongebob') + >>> stmt1 = select(user_table).where(user_table.c.name == "sandy") + >>> stmt2 = select(user_table).where(user_table.c.name == "spongebob") >>> u = union_all(stmt1, stmt2) >>> with engine.connect() as conn: ... result = conn.execute(u) @@ -1167,9 +1152,9 @@ collection that may be referred towards in an enclosing :func:`_sql.select`:: >>> u_subq = u.subquery() >>> stmt = ( - ... select(u_subq.c.name, address_table.c.email_address). - ... join_from(address_table, u_subq). - ... order_by(u_subq.c.name, address_table.c.email_address) + ... select(u_subq.c.name, address_table.c.email_address) + ... .join_from(address_table, u_subq) + ... .order_by(u_subq.c.name, address_table.c.email_address) ... ) >>> with engine.connect() as conn: ... result = conn.execute(stmt) @@ -1204,8 +1189,8 @@ object that represents the SELECT / UNION / etc statement we want to execute; this statement should be composed against the target ORM entities or their underlying mapped :class:`_schema.Table` objects:: - >>> stmt1 = select(User).where(User.name == 'sandy') - >>> stmt2 = select(User).where(User.name == 'spongebob') + >>> stmt1 = select(User).where(User.name == "sandy") + >>> stmt2 = select(User).where(User.name == "spongebob") >>> u = union_all(stmt1, stmt2) For a simple SELECT with UNION that is not already nested inside of a @@ -1279,15 +1264,13 @@ can return ``user_account`` rows that have more than one related row in .. sourcecode:: pycon+sql >>> subq = ( - ... select(func.count(address_table.c.id)). - ... where(user_table.c.id == address_table.c.user_id). - ... group_by(address_table.c.user_id). - ... having(func.count(address_table.c.id) > 1) + ... select(func.count(address_table.c.id)) + ... .where(user_table.c.id == address_table.c.user_id) + ... .group_by(address_table.c.user_id) + ... .having(func.count(address_table.c.id) > 1) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1309,13 +1292,10 @@ clause: .. sourcecode:: pycon+sql >>> subq = ( - ... select(address_table.c.id). - ... where(user_table.c.id == address_table.c.user_id) + ... select(address_table.c.id).where(user_table.c.id == address_table.c.user_id) ... ).exists() >>> with engine.connect() as conn: - ... result = conn.execute( - ... select(user_table.c.name).where(~subq) - ... ) + ... result = conn.execute(select(user_table.c.name).where(~subq)) ... print(result.all()) {opensql}BEGIN (implicit) SELECT user_account.name @@ -1571,11 +1551,15 @@ number the email addresses of individual users: .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.row_number().over(partition_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.row_number().over(partition_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1593,10 +1577,15 @@ We also may make use of the ``ORDER BY`` clause using :paramref:`_functions.Func .. sourcecode:: pycon+sql - >>> stmt = select( - ... func.count().over(order_by=user_table.c.name), - ... user_table.c.name, - ... address_table.c.email_address).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count().over(order_by=user_table.c.name), + ... user_table.c.name, + ... address_table.c.email_address, + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1635,7 +1624,7 @@ method:: >>> print( ... func.unnest( - ... func.percentile_disc([0.25,0.5,0.75,1]).within_group(user_table.c.name) + ... func.percentile_disc([0.25, 0.5, 0.75, 1]).within_group(user_table.c.name) ... ) ... ) unnest(percentile_disc(:percentile_disc_1) WITHIN GROUP (ORDER BY user_account.name)) @@ -1644,10 +1633,16 @@ method:: particular subset of rows compared to the total range of rows returned, available using the :meth:`_functions.FunctionElement.filter` method:: - >>> stmt = select( - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'sandy'), - ... func.count(address_table.c.email_address).filter(user_table.c.name == 'spongebob') - ... ).select_from(user_table).join(address_table) + >>> stmt = ( + ... select( + ... func.count(address_table.c.email_address).filter(user_table.c.name == "sandy"), + ... func.count(address_table.c.email_address).filter( + ... user_table.c.name == "spongebob" + ... ), + ... ) + ... .select_from(user_table) + ... .join(address_table) + ... ) >>> with engine.connect() as conn: # doctest:+SKIP ... result = conn.execute(stmt) ... print(result.all()) @@ -1811,11 +1806,7 @@ string into one of MySQL's JSON functions: >>> from sqlalchemy import JSON >>> from sqlalchemy import type_coerce >>> from sqlalchemy.dialects import mysql - >>> s = select( - ... type_coerce( - ... {'some_key': {'foo': 'bar'}}, JSON - ... )['some_key'] - ... ) + >>> s = select(type_coerce({"some_key": {"foo": "bar"}}, JSON)["some_key"]) >>> print(s.compile(dialect=mysql.dialect())) SELECT JSON_EXTRACT(%s, %s) AS anon_1 diff --git a/doc/build/tutorial/data_update.rst b/doc/build/tutorial/data_update.rst index 1091bccf645..78c2e60f63d 100644 --- a/doc/build/tutorial/data_update.rst +++ b/doc/build/tutorial/data_update.rst @@ -56,8 +56,9 @@ A basic UPDATE looks like:: >>> from sqlalchemy import update >>> stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star') + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") ... ) >>> print(stmt) {opensql}UPDATE user_account SET fullname=:fullname WHERE user_account.name = :name_1 @@ -70,10 +71,7 @@ keyword arguments. UPDATE supports all the major SQL forms of UPDATE, including updates against expressions, where we can make use of :class:`_schema.Column` expressions:: - >>> stmt = ( - ... update(user_table). - ... values(fullname="Username: " + user_table.c.name) - ... ) + >>> stmt = update(user_table).values(fullname="Username: " + user_table.c.name) >>> print(stmt) {opensql}UPDATE user_account SET fullname=(:name_1 || user_account.name) @@ -86,19 +84,19 @@ that literal values would normally go: >>> from sqlalchemy import bindparam >>> stmt = ( - ... update(user_table). - ... where(user_table.c.name == bindparam('oldname')). - ... values(name=bindparam('newname')) + ... update(user_table) + ... .where(user_table.c.name == bindparam("oldname")) + ... .values(name=bindparam("newname")) ... ) >>> with engine.begin() as conn: - ... conn.execute( - ... stmt, - ... [ - ... {'oldname':'jack', 'newname':'ed'}, - ... {'oldname':'wendy', 'newname':'mary'}, - ... {'oldname':'jim', 'newname':'jake'}, - ... ] - ... ) + ... conn.execute( + ... stmt, + ... [ + ... {"oldname": "jack", "newname": "ed"}, + ... {"oldname": "wendy", "newname": "mary"}, + ... {"oldname": "jim", "newname": "jake"}, + ... ], + ... ) {opensql}BEGIN (implicit) UPDATE user_account SET name=? WHERE user_account.name = ? [...] (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')) @@ -118,11 +116,11 @@ An UPDATE statement can make use of rows in other tables by using a anywhere a column expression might be placed:: >>> scalar_subq = ( - ... select(address_table.c.email_address). - ... where(address_table.c.user_id == user_table.c.id). - ... order_by(address_table.c.id). - ... limit(1). - ... scalar_subquery() + ... select(address_table.c.email_address) + ... .where(address_table.c.user_id == user_table.c.id) + ... .order_by(address_table.c.id) + ... .limit(1) + ... .scalar_subquery() ... ) >>> update_stmt = update(user_table).values(fullname=scalar_subq) >>> print(update_stmt) @@ -143,11 +141,11 @@ syntax will be generated implicitly when additional tables are located in the WHERE clause of the statement:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values(fullname='Pat') - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values(fullname="Pat") + ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname FROM address WHERE user_account.id = address.user_id AND address.email_address = :email_address_1 @@ -158,16 +156,13 @@ requires we refer to :class:`_schema.Table` objects in the VALUES clause in order to refer to additional tables:: >>> update_stmt = ( - ... update(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com'). - ... values( - ... { - ... user_table.c.fullname: "Pat", - ... address_table.c.email_address: "pat@aol.com" - ... } - ... ) - ... ) + ... update(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... .values( + ... {user_table.c.fullname: "Pat", address_table.c.email_address: "pat@aol.com"} + ... ) + ... ) >>> from sqlalchemy.dialects import mysql >>> print(update_stmt.compile(dialect=mysql.dialect())) {opensql}UPDATE user_account, address @@ -185,12 +180,8 @@ of an UPDATE actually impacts the evaluation of each expression. For this use case, the :meth:`_sql.Update.ordered_values` method accepts a sequence of tuples so that this order may be controlled [2]_:: - >>> update_stmt = ( - ... update(some_table). - ... ordered_values( - ... (some_table.c.y, 20), - ... (some_table.c.x, some_table.c.y + 10) - ... ) + >>> update_stmt = update(some_table).ordered_values( + ... (some_table.c.y, 20), (some_table.c.x, some_table.c.y + 10) ... ) >>> print(update_stmt) {opensql}UPDATE some_table SET y=:y, x=(some_table.y + :y_1) @@ -220,7 +211,7 @@ allowing for a RETURNING variant on some database backends. :: >>> from sqlalchemy import delete - >>> stmt = delete(user_table).where(user_table.c.name == 'patrick') + >>> stmt = delete(user_table).where(user_table.c.name == "patrick") >>> print(stmt) {opensql}DELETE FROM user_account WHERE user_account.name = :name_1 @@ -235,10 +226,10 @@ subqueries in the WHERE clause as well as backend-specific multiple table syntaxes, such as ``DELETE FROM..USING`` on MySQL:: >>> delete_stmt = ( - ... delete(user_table). - ... where(user_table.c.id == address_table.c.user_id). - ... where(address_table.c.email_address == 'patrick@aol.com') - ... ) + ... delete(user_table) + ... .where(user_table.c.id == address_table.c.user_id) + ... .where(address_table.c.email_address == "patrick@aol.com") + ... ) >>> from sqlalchemy.dialects import mysql >>> print(delete_stmt.compile(dialect=mysql.dialect())) {opensql}DELETE FROM user_account USING user_account, address @@ -259,9 +250,9 @@ is available from the :attr:`_engine.CursorResult.rowcount` attribute: >>> with engine.begin() as conn: ... result = conn.execute( - ... update(user_table). - ... values(fullname="Patrick McStar"). - ... where(user_table.c.name == 'patrick') + ... update(user_table) + ... .values(fullname="Patrick McStar") + ... .where(user_table.c.name == "patrick") ... ) ... print(result.rowcount) {opensql}BEGIN (implicit) @@ -316,9 +307,10 @@ be iterated:: >>> update_stmt = ( - ... update(user_table).where(user_table.c.name == 'patrick'). - ... values(fullname='Patrick the Star'). - ... returning(user_table.c.id, user_table.c.name) + ... update(user_table) + ... .where(user_table.c.name == "patrick") + ... .values(fullname="Patrick the Star") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(update_stmt) {opensql}UPDATE user_account SET fullname=:fullname @@ -326,8 +318,9 @@ be iterated:: RETURNING user_account.id, user_account.name{stop} >>> delete_stmt = ( - ... delete(user_table).where(user_table.c.name == 'patrick'). - ... returning(user_table.c.id, user_table.c.name) + ... delete(user_table) + ... .where(user_table.c.name == "patrick") + ... .returning(user_table.c.id, user_table.c.name) ... ) >>> print(delete_stmt) {opensql}DELETE FROM user_account diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 6492f5f0ec2..cf93534e4fe 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -107,7 +107,7 @@ where we acquired the :class:`_future.Connection` object: ... conn.execute(text("CREATE TABLE some_table (x int, y int)")) ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}] + ... [{"x": 1, "y": 1}, {"x": 2, "y": 4}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -145,7 +145,7 @@ may be referred towards as **begin once**: >>> with engine.begin() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}] + ... [{"x": 6, "y": 8}, {"x": 9, "y": 10}], ... ) {opensql}BEGIN (implicit) INSERT INTO some_table (x, y) VALUES (?, ?) @@ -286,8 +286,8 @@ Below we illustrate a variety of ways to access rows. result = conn.execute(text("select x, y from some_table")) for dict_row in result.mappings(): - x = dict_row['x'] - y = dict_row['y'] + x = dict_row["x"] + y = dict_row["y"] .. @@ -316,12 +316,9 @@ construct accepts these using a colon format "``:y``". The actual value for .. sourcecode:: pycon+sql >>> with engine.connect() as conn: - ... result = conn.execute( - ... text("SELECT x, y FROM some_table WHERE y > :y"), - ... {"y": 2} - ... ) + ... result = conn.execute(text("SELECT x, y FROM some_table WHERE y > :y"), {"y": 2}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? [...] (2,) @@ -370,7 +367,7 @@ be invoked against each parameter set individually: >>> with engine.connect() as conn: ... conn.execute( ... text("INSERT INTO some_table (x, y) VALUES (:x, :y)"), - ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}] + ... [{"x": 11, "y": 12}, {"x": 13, "y": 14}], ... ) ... conn.commit() {opensql}BEGIN (implicit) @@ -436,7 +433,7 @@ a context manager: >>> with Session(engine) as session: ... result = session.execute(stmt, {"y": 6}) ... for row in result: - ... print(f"x: {row.x} y: {row.y}") + ... print(f"x: {row.x} y: {row.y}") {opensql}BEGIN (implicit) SELECT x, y FROM some_table WHERE y > ? ORDER BY x, y [...] (6,){stop} @@ -462,7 +459,7 @@ our data: >>> with Session(engine) as session: ... result = session.execute( ... text("UPDATE some_table SET y=:y WHERE x=:x"), - ... [{"x": 9, "y":11}, {"x": 13, "y": 15}] + ... [{"x": 9, "y": 11}, {"x": 13, "y": 15}], ... ) ... session.commit() {opensql}BEGIN (implicit) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 6444ed692e0..215d9fd8b89 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -76,9 +76,9 @@ that will be how we will refer to the table in application code:: >>> user_table = Table( ... "user_account", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('name', String(30)), - ... Column('fullname', String) + ... Column("id", Integer, primary_key=True), + ... Column("name", String(30)), + ... Column("fullname", String), ... ) We can observe that the above :class:`_schema.Table` construct looks a lot like @@ -151,9 +151,9 @@ table:: >>> address_table = Table( ... "address", ... metadata_obj, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', ForeignKey('user_account.id'), nullable=False), - ... Column('email_address', String, nullable=False) + ... Column("id", Integer, primary_key=True), + ... Column("user_id", ForeignKey("user_account.id"), nullable=False), + ... Column("email_address", String, nullable=False), ... ) The table above also features a third kind of constraint, which in SQL is the @@ -297,6 +297,7 @@ known as the **declarative base**. We get a new declarative base from the :func:`_orm.declarative_base` function:: from sqlalchemy.orm import declarative_base + Base = declarative_base() .. @@ -313,7 +314,7 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and >>> from sqlalchemy.orm import relationship >>> class User(Base): - ... __tablename__ = 'user_account' + ... __tablename__ = "user_account" ... ... id = Column(Integer, primary_key=True) ... name = Column(String(30)) @@ -322,14 +323,14 @@ for the ``user`` and ``address`` table in terms of new classes ``User`` and ... addresses = relationship("Address", back_populates="user") ... ... def __repr__(self): - ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" + ... return f"User(id={self.id!r}, name={self.name!r}, fullname={self.fullname!r})" >>> class Address(Base): - ... __tablename__ = 'address' + ... __tablename__ = "address" ... ... id = Column(Integer, primary_key=True) ... email_address = Column(String, nullable=False) - ... user_id = Column(Integer, ForeignKey('user_account.id')) + ... user_id = Column(Integer, ForeignKey("user_account.id")) ... ... user = relationship("User", back_populates="addresses") ... @@ -428,7 +429,6 @@ using :meth:`_schema.MetaData.create_all`:: # declarative base Base.metadata.create_all(engine) - Combining Core Table Declarations with ORM Declarative ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -446,6 +446,7 @@ than having the declarative process generate it:: mapper_registry = registry() Base = mapper_registry.generate_base() + class User(Base): __table__ = user_table @@ -454,6 +455,7 @@ than having the declarative process generate it:: def __repr__(self): return f"User({self.name!r}, {self.fullname!r})" + class Address(Base): __table__ = address_table diff --git a/doc/build/tutorial/orm_data_manipulation.rst b/doc/build/tutorial/orm_data_manipulation.rst index b0b67f53c4b..e8bdb3d4c43 100644 --- a/doc/build/tutorial/orm_data_manipulation.rst +++ b/doc/build/tutorial/orm_data_manipulation.rst @@ -290,9 +290,7 @@ from this row and we will get our updated value back: .. sourcecode:: pycon+sql - >>> sandy_fullname = session.execute( - ... select(User.fullname).where(User.id == 2) - ... ).scalar_one() + >>> sandy_fullname = session.execute(select(User.fullname).where(User.id == 2)).scalar_one() {opensql}UPDATE user_account SET fullname=? WHERE user_account.id = ? [...] ('Sandy Squirrel', 2) SELECT user_account.fullname @@ -336,9 +334,9 @@ a value in the ``User.name`` column: .. sourcecode:: pycon+sql >>> session.execute( - ... update(User). - ... where(User.name == "sandy"). - ... values(fullname="Sandy Squirrel Extraordinaire") + ... update(User) + ... .where(User.name == "sandy") + ... .values(fullname="Sandy Squirrel Extraordinaire") ... ) {opensql}UPDATE user_account SET fullname=? WHERE user_account.name = ? [...] ('Sandy Squirrel Extraordinaire', 'sandy'){stop} @@ -525,7 +523,7 @@ and of course the database data is present again as well: .. sourcecode:: pycon+sql - {sql}>>> session.execute(select(User).where(User.name == 'patrick')).scalar_one() is patrick + {sql}>>> session.execute(select(User).where(User.name == "patrick")).scalar_one() is patrick SELECT user_account.id, user_account.name, user_account.fullname FROM user_account WHERE user_account.name = ? diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 2eacc39e369..02ff2c17221 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -25,8 +25,10 @@ and other directives: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -34,13 +36,12 @@ and other directives: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses") - Above, the ``User`` class now has an attribute ``User.addresses`` and the ``Address`` class has an attribute ``Address.user``. The :func:`_orm.relationship` construct will be used to inspect the table @@ -69,7 +70,7 @@ We can start by illustrating what :func:`_orm.relationship` does to instances of objects. If we make a new ``User`` object, we can note that there is a Python list when we access the ``.addresses`` element:: - >>> u1 = User(name='pkrabs', fullname='Pearl Krabs') + >>> u1 = User(name="pkrabs", fullname="Pearl Krabs") >>> u1.addresses [] @@ -301,11 +302,7 @@ corresponding to the :func:`_orm.relationship` may be passed as the **single argument** to :meth:`_sql.Select.join`, where it serves to indicate both the right side of the join as well as the ON clause at once:: - >>> print( - ... select(Address.email_address). - ... select_from(User). - ... join(User.addresses) - ... ) + >>> print(select(Address.email_address).select_from(User).join(User.addresses)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -317,10 +314,7 @@ ON clause, it works because of the :class:`_schema.ForeignKeyConstraint` between the two mapped :class:`_schema.Table` objects, not because of the :func:`_orm.relationship` objects on the ``User`` and ``Address`` classes:: - >>> print( - ... select(Address.email_address). - ... join_from(User, Address) - ... ) + >>> print(select(Address.email_address).join_from(User, Address)) {opensql}SELECT address.email_address FROM user_account JOIN address ON user_account.id = address.user_id @@ -338,12 +332,12 @@ demonstrate we will construct the same join illustrated at :ref:`tutorial_orm_en using the :func:`_orm.relationship` attributes to join instead:: >>> print( - ... select(User). - ... join(User.addresses.of_type(address_alias_1)). - ... where(address_alias_1.email_address == 'patrick@aol.com'). - ... join(User.addresses.of_type(address_alias_2)). - ... where(address_alias_2.email_address == 'patrick@gmail.com') - ... ) + ... select(User) + ... .join(User.addresses.of_type(address_alias_1)) + ... .where(address_alias_1.email_address == "patrick@aol.com") + ... .join(User.addresses.of_type(address_alias_2)) + ... .where(address_alias_2.email_address == "patrick@gmail.com") + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account JOIN address AS address_1 ON user_account.id = address_1.user_id @@ -356,10 +350,7 @@ aliased entity, the attribute is available from the :func:`_orm.aliased` construct directly:: >>> user_alias_1 = aliased(User) - >>> print( - ... select(user_alias_1.name). - ... join(user_alias_1.addresses) - ... ) + >>> print(select(user_alias_1.name).join(user_alias_1.addresses)) {opensql}SELECT user_account_1.name FROM user_account AS user_account_1 JOIN address ON user_account_1.id = address.user_id @@ -381,9 +372,8 @@ email addresses: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... join(User.addresses.and_(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).join( + ... User.addresses.and_(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -411,9 +401,8 @@ an optional WHERE criteria to limit the rows matched by the subquery: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(User.addresses.any(Address.email_address == 'pearl.krabs@gmail.com')) + >>> stmt = select(User.fullname).where( + ... User.addresses.any(Address.email_address == "pearl.krabs@gmail.com") ... ) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname @@ -431,10 +420,7 @@ for ``User`` entities that have no related ``Address`` rows: .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(User.fullname). - ... where(~User.addresses.any()) - ... ) + >>> stmt = select(User.fullname).where(~User.addresses.any()) >>> session.execute(stmt).all() {opensql}SELECT user_account.fullname FROM user_account @@ -451,10 +437,7 @@ which belonged to "pearl": .. sourcecode:: pycon+sql - >>> stmt = ( - ... select(Address.email_address). - ... where(Address.user.has(User.name=="pkrabs")) - ... ) + >>> stmt = select(Address.email_address).where(Address.user.has(User.name == "pkrabs")) >>> session.execute(stmt).all() {opensql}SELECT address.email_address FROM address @@ -568,8 +551,10 @@ the :paramref:`_orm.relationship.lazy` option, e.g.: .. sourcecode:: python from sqlalchemy.orm import relationship + + class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" addresses = relationship("Address", back_populates="user", lazy="selectin") @@ -611,11 +596,11 @@ related ``Address`` objects: .. sourcecode:: pycon+sql >>> from sqlalchemy.orm import selectinload - >>> stmt = ( - ... select(User).options(selectinload(User.addresses)).order_by(User.id) - ... ) + >>> stmt = select(User).options(selectinload(User.addresses)).order_by(User.id) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -655,7 +640,9 @@ as below where we know that all ``Address`` objects have an associated >>> from sqlalchemy.orm import joinedload >>> stmt = ( - ... select(Address).options(joinedload(Address.user, innerjoin=True)).order_by(Address.id) + ... select(Address) + ... .options(joinedload(Address.user, innerjoin=True)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -731,10 +718,11 @@ example: >>> from sqlalchemy.orm import contains_eager >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(contains_eager(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(contains_eager(Address.user)) + ... .order_by(Address.id) ... ) >>> for row in session.execute(stmt): ... print(f"{row.Address.email_address} {row.Address.user.name}") @@ -752,10 +740,11 @@ rows. If we had applied :func:`_orm.joinedload` separately, we would get a SQL query that unnecessarily joins twice:: >>> stmt = ( - ... select(Address). - ... join(Address.user). - ... where(User.name == 'pkrabs'). - ... options(joinedload(Address.user)).order_by(Address.id) + ... select(Address) + ... .join(Address.user) + ... .where(User.name == "pkrabs") + ... .options(joinedload(Address.user)) + ... .order_by(Address.id) ... ) >>> print(stmt) # SELECT has a JOIN and LEFT OUTER JOIN unnecessarily {opensql}SELECT address.id, address.email_address, address.user_id, @@ -791,19 +780,19 @@ the email addresses with the ``sqlalchemy.org`` domain, we can apply >>> from sqlalchemy.orm import selectinload >>> stmt = ( - ... select(User). - ... options( - ... selectinload( - ... User.addresses.and_( - ... ~Address.email_address.endswith("sqlalchemy.org") - ... ) - ... ) - ... ). - ... order_by(User.id). - ... execution_options(populate_existing=True) + ... select(User) + ... .options( + ... selectinload( + ... User.addresses.and_(~Address.email_address.endswith("sqlalchemy.org")) + ... ) + ... ) + ... .order_by(User.id) + ... .execution_options(populate_existing=True) ... ) >>> for row in session.execute(stmt): - ... print(f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})") + ... print( + ... f"{row.User.name} ({', '.join(a.email_address for a in row.User.addresses)})" + ... ) {opensql}SELECT user_account.id, user_account.name, user_account.fullname FROM user_account ORDER BY user_account.id [...] () @@ -857,7 +846,7 @@ relationship will never try to emit SQL: .. sourcecode:: python class User(Base): - __tablename__ = 'user_account' + __tablename__ = "user_account" # ... Column mappings @@ -865,13 +854,12 @@ relationship will never try to emit SQL: class Address(Base): - __tablename__ = 'address' + __tablename__ = "address" # ... Column mappings user = relationship("User", back_populates="addresses", lazy="raise_on_sql") - Using such a mapping, the application is blocked from lazy loading, indicating that a particular query would need to specify a loader strategy: diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py new file mode 100644 index 00000000000..88e9288bc37 --- /dev/null +++ b/tools/format_docs_code.py @@ -0,0 +1,395 @@ +from argparse import ArgumentParser +from argparse import RawDescriptionHelpFormatter +from collections.abc import Iterator +from pathlib import Path +import re + +from black import format_str +from black.const import DEFAULT_LINE_LENGTH +from black.files import parse_pyproject_toml +from black.mode import Mode +from black.mode import TargetVersion + + +home = Path(__file__).parent.parent + +_Block = list[ + tuple[ + str, + int, + str | None, + str | None, + str, + ] +] + + +def _format_block( + input_block: _Block, + exit_on_error: bool, + errors: list[tuple[int, str, Exception]], + is_doctest: bool, +) -> list[str]: + if not is_doctest: + # The first line may have additional padding. Remove then restore later + add_padding = start_space.match(input_block[0][4]).groups()[0] + skip = len(add_padding) + code = "\n".join( + c[skip:] if c.startswith(add_padding) else c + for *_, c in input_block + ) + else: + add_padding = None + code = "\n".join(c for *_, c in input_block) + + try: + formatted = format_str(code, mode=BLACK_MODE) + except Exception as e: + start_line = input_block[0][1] + errors.append((start_line, code, e)) + if is_doctest: + print( + "Could not format code block starting at " + f"line {start_line}:\n{code}\nError: {e}" + ) + if exit_on_error: + print("Exiting since --exit-on-error was passed") + raise + else: + print("Ignoring error") + elif VERBOSE: + print( + "Could not format code block starting at " + f"line {start_line}:\n---\n{code}\n---Error: {e}" + ) + return [line for line, *_ in input_block] + else: + formatted_code_lines = formatted.splitlines() + padding = input_block[0][2] + sql_prefix = input_block[0][3] or "" + + if is_doctest: + formatted_lines = [ + f"{padding}{sql_prefix}>>> {formatted_code_lines[0]}", + *( + f"{padding}...{' ' if fcl else ''}{fcl}" + for fcl in formatted_code_lines[1:] + ), + ] + else: + formatted_lines = [ + f"{padding}{add_padding}{sql_prefix}{formatted_code_lines[0]}", + *( + f"{padding}{add_padding}{fcl}" if fcl else fcl + for fcl in formatted_code_lines[1:] + ), + ] + if not input_block[-1][0] and formatted_lines[-1]: + # last line was empty and black removed it. restore it + formatted_lines.append("") + return formatted_lines + + +format_directive = re.compile(r"^\.\.\s*format\s*:\s*(on|off)\s*$") + +doctest_code_start = re.compile(r"^(\s+)({(?:opensql|sql|stop)})?>>>\s?(.+)") +doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") +sql_code_start = re.compile(r"^(\s+){(?:open)?sql}") +sql_code_stop = re.compile(r"^(\s+){stop}") + +start_code_section = re.compile( + r"^(((?!\.\.).+::)|(\.\.\s*sourcecode::(.*py.*)?)|(::))$" +) +start_space = re.compile(r"^(\s*)[^ ]?") + + +def format_file( + file: Path, exit_on_error: bool, check: bool, no_plain: bool +) -> tuple[bool, int]: + buffer = [] + if not check: + print(f"Running file {file} ..", end="") + original = file.read_text("utf-8") + doctest_block: _Block | None = None + plain_block: _Block | None = None + + plain_code_section = False + plain_padding = None + plain_padding_len = None + sql_section = False + + errors = [] + + disable_format = False + for line_no, line in enumerate(original.splitlines(), 1): + # start_code_section requires no spaces at the start + + if start_code_section.match(line.strip()): + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + plain_code_section = True + assert not sql_section + plain_padding = start_space.match(line).groups()[0] + plain_padding_len = len(plain_padding) + buffer.append(line) + continue + elif ( + plain_code_section + and line.strip() + and not line.startswith(" " * (plain_padding_len + 1)) + ): + plain_code_section = sql_section = False + elif match := format_directive.match(line): + disable_format = match.groups()[0] == "off" + + if doctest_block: + assert not plain_block + if match := doctest_code_continue.match(line): + doctest_block.append( + (line, line_no, None, None, match.groups()[0]) + ) + continue + else: + buffer.extend( + _format_block( + doctest_block, exit_on_error, errors, is_doctest=True + ) + ) + doctest_block = None + elif plain_block: + if ( + plain_code_section + and not doctest_code_start.match(line) + and not sql_code_start.match(line) + ): + plain_block.append( + (line, line_no, None, None, line[plain_padding_len:]) + ) + continue + else: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + + if line and (match := doctest_code_start.match(line)): + plain_code_section = sql_section = False + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + padding, code = match.group(1, 3) + doctest_block = [(line, line_no, padding, match.group(2), code)] + elif ( + line + and plain_code_section + and (match := sql_code_start.match(line)) + ): + if plain_block: + buffer.extend( + _format_block( + plain_block, exit_on_error, errors, is_doctest=False + ) + ) + plain_block = None + + sql_section = True + buffer.append(line) + elif line and sql_section and (match := sql_code_stop.match(line)): + sql_section = False + line = line.replace("{stop}", "") + assert not doctest_block + # start of a plain block + if line.strip(): + plain_block = [ + ( + line, + line_no, + plain_padding, + "{stop}", + line[plain_padding_len:], + ) + ] + + elif ( + line + and not no_plain + and not disable_format + and plain_code_section + and not sql_section + ): + assert not doctest_block + # start of a plain block + plain_block = [ + (line, line_no, plain_padding, None, line[plain_padding_len:]) + ] + else: + buffer.append(line) + + if doctest_block: + buffer.extend( + _format_block( + doctest_block, exit_on_error, errors, is_doctest=True + ) + ) + if plain_block: + buffer.extend( + _format_block(plain_block, exit_on_error, errors, is_doctest=False) + ) + if buffer: + # if there is nothing in the buffer something strange happened so + # don't do anything + buffer.append("") + updated = "\n".join(buffer) + equal = original == updated + if not check: + print( + f"..done. {len(errors)} error(s).", + "No changes" if equal else "Changes detected", + ) + if not equal: + # write only if there are changes to write + file.write_text(updated, "utf-8", newline="\n") + else: + if not check: + print(".. Nothing to write") + equal = bool(original) is False + + if check: + if not equal: + print(f"File {file} would be formatted") + return equal, len(errors) + + +def iter_files(directory) -> Iterator[Path]: + yield from (home / directory).glob("./**/*.rst") + + +def main( + file: str | None, + directory: str, + exit_on_error: bool, + check: bool, + no_plain: bool, +): + if file is not None: + result = [format_file(Path(file), exit_on_error, check, no_plain)] + else: + result = [ + format_file(doc, exit_on_error, check, no_plain) + for doc in iter_files(directory) + ] + + if check: + formatting_error_counts = [e for _, e in result if e] + to_reformat = len([b for b, _ in result if not b]) + + if not to_reformat and not formatting_error_counts: + print("All files are correctly formatted") + exit(0) + else: + print( + f"{to_reformat} file(s) would be reformatted;", + ( + f"{sum(formatting_error_counts)} formatting errors " + f"reported in {len(formatting_error_counts)} files" + ) + if formatting_error_counts + else "no formatting errors reported", + ) + + # interim, until we fix all formatting errors + if not to_reformat: + exit(0) + exit(1) + + +if __name__ == "__main__": + parser = ArgumentParser( + description="""Formats code inside docs using black. Supports \ +doctest code blocks and also tries to format plain code block identifies as \ +all indented blocks of at least 4 spaces, unless '--no-plain' is specified. + +Plain code block may lead to false positive. To disable formatting on a \ +file section the comment ``.. format: off`` disables formatting until \ +``.. format: on`` is encountered or the file ends. +Another alterative is to use less than 4 spaces to indent the code block. +""", + formatter_class=RawDescriptionHelpFormatter, + ) + parser.add_argument( + "-f", "--file", help="Format only this file instead of all docs" + ) + parser.add_argument( + "-d", + "--directory", + help="Find documents in this directory and its sub dirs", + default="doc/build", + ) + parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted.", + action="store_true", + ) + parser.add_argument( + "-e", + "--exit-on-error", + help="Exit in case of black format error instead of ignoring it. " + "This option is only valid for doctest code blocks", + action="store_true", + ) + parser.add_argument( + "-l", + "--project-line-length", + help="Configure the line length to the project value instead " + "of using the black default of 88", + action="store_true", + ) + parser.add_argument( + "-v", + "--verbose", + help="Increase verbosity", + action="store_true", + ) + parser.add_argument( + "-n", + "--no-plain", + help="Disable plain code blocks formatting that's more difficult " + "to parse compared to doctest code blocks", + action="store_true", + ) + args = parser.parse_args() + + config = parse_pyproject_toml(home / "pyproject.toml") + BLACK_MODE = Mode( + target_versions=set( + TargetVersion[val.upper()] + for val in config.get("target_version", []) + if val != "py27" + ), + line_length=config.get("line_length", DEFAULT_LINE_LENGTH) + if args.project_line_length + else DEFAULT_LINE_LENGTH, + ) + VERBOSE = args.verbose + + main( + args.file, + args.directory, + args.exit_on_error, + args.check, + args.no_plain, + ) From f35446512582defc2fcde826b8c059ed09ddabf3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 3 Oct 2022 11:40:27 -0400 Subject: [PATCH 386/632] clarify precedence docs Change-Id: I748f2736eb6382c8625b3419a82785b48766d8f7 references: #8584 (cherry picked from commit b295a3b58f13d566c37244448218e4287f5e47ee) --- lib/sqlalchemy/sql/operators.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 1da50322967..806d41eaf88 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -145,13 +145,20 @@ def op( between this element and the expression passed to the generated function. - :param precedence: precedence to apply to the operator, when - parenthesizing expressions. A lower number will cause the expression - to be parenthesized when applied against another operator with - higher precedence. The default value of ``0`` is lower than all - operators except for the comma (``,``) and ``AS`` operators. - A value of 100 will be higher or equal to all operators, and -100 - will be lower than or equal to all operators. + :param precedence: precedence which the database is expected to apply + to the operator in SQL expressions. This integer value acts as a hint + for the SQL compiler to know when explicit parenthesis should be + rendered around a particular operation. A lower number will cause the + expression to be parenthesized when applied against another operator + with higher precedence. The default value of ``0`` is lower than all + operators except for the comma (``,``) and ``AS`` operators. A value + of 100 will be higher or equal to all operators, and -100 will be + lower than or equal to all operators. + + .. seealso:: + + :ref:`faq_sql_expression_op_parenthesis` - detailed description + of how the SQLAlchemy SQL compiler renders parenthesis :param is_comparison: legacy; if True, the operator will be considered as a "comparison" operator, that is which evaluates to a boolean From 322b9b4faa6dfd4eea3d2ba12825850318a59ef4 Mon Sep 17 00:00:00 2001 From: John Bodley <4567245+john-bodley@users.noreply.github.com> Date: Thu, 29 Sep 2022 21:58:58 -0400 Subject: [PATCH 387/632] adjust MySQL view reflection for non-standard MySQL variants Adjusted the regular expression used to match "CREATE VIEW" when testing for views to work more flexibly, no longer requiring the special keyword "ALGORITHM" in the middle, which was intended to be optional but was not working correctly. The change allows view reflection to work more completely on MySQL-compatible variants such as StarRocks. Pull request courtesy John Bodley. Fixes: #8588 Closes: #8589 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8589 Pull-request-sha: d85b2c5b51e45cec543c9ae9d62d6d659b063354 Change-Id: I173137f0bf68639cad0d5c329055475b40ddb5e4 (cherry picked from commit 9829bc43d69ea5e714014f5ac5f036a94d13bc08) --- doc/build/changelog/unreleased_14/8588.rst | 10 ++++++++++ lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/reflection.py | 5 +++++ test/dialect/mysql/test_reflection.py | 17 +++++++++++++++-- 4 files changed, 31 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8588.rst diff --git a/doc/build/changelog/unreleased_14/8588.rst b/doc/build/changelog/unreleased_14/8588.rst new file mode 100644 index 00000000000..879b8b29073 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8588.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, mysql + :tickets: 8588 + + Adjusted the regular expression used to match "CREATE VIEW" when + testing for views to work more flexibly, no longer requiring the + special keyword "ALGORITHM" in the middle, which was intended to be + optional but was not working correctly. The change allows view reflection + to work more completely on MySQL-compatible variants such as StarRocks. + Pull request courtesy John Bodley. \ No newline at end of file diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 111c63bff16..70b60a0a0fe 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -3107,7 +3107,7 @@ def _setup_parser(self, connection, table_name, schema=None, **kw): sql = self._show_create_table( connection, None, charset, full_name=full_name ) - if re.match(r"^CREATE (?:ALGORITHM)?.* VIEW", sql): + if parser._check_view(sql): # Adapt views to something table-like. columns = self._describe_table( connection, None, charset, full_name=full_name diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index 27394bbe9fc..f536496d469 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -70,6 +70,9 @@ def parse(self, show_create, charset): pass return state + def _check_view(self, sql): + return bool(self._re_is_view.match(sql)) + def _parse_constraints(self, line): """Parse a KEY or CONSTRAINT line. @@ -349,6 +352,8 @@ def _prep_regexes(self): self.preparer._unescape_identifier, ) + self._re_is_view = _re_compile(r"^CREATE(?! TABLE)(\s.*)?\sVIEW") + # `col`,`col2`(32),`col3`(15) DESC # self._re_keyexprs = _re_compile( diff --git a/test/dialect/mysql/test_reflection.py b/test/dialect/mysql/test_reflection.py index 4c763a6483b..529d352a2ae 100644 --- a/test/dialect/mysql/test_reflection.py +++ b/test/dialect/mysql/test_reflection.py @@ -1122,8 +1122,6 @@ def test_case_sensitive_reflection_dual_case_references( class RawReflectionTest(fixtures.TestBase): - __backend__ = True - def setup_test(self): dialect = mysql.dialect() self.parser = _reflection.MySQLTableDefinitionParser( @@ -1249,3 +1247,18 @@ def test_fk_reflection(self): "SET NULL", ), ) + + @testing.combinations( + ( + "CREATE ALGORITHM=UNDEFINED DEFINER=`scott`@`%` " + "SQL SECURITY DEFINER VIEW `v1` AS SELECT", + True, + ), + ("CREATE VIEW `v1` AS SELECT", True), + ("CREATE TABLE `v1`", False), + ("CREATE TABLE `VIEW`", False), + ("CREATE TABLE `VIEW_THINGS`", False), + ("CREATE TABLE `A VIEW`", False), + ) + def test_is_view(self, sql, expected): + is_(self.parser._check_view(sql), expected) From be400435239644f9c51575dca9dbb409e08ceefc Mon Sep 17 00:00:00 2001 From: Jochen Kupperschmidt Date: Thu, 6 Oct 2022 21:27:02 +0200 Subject: [PATCH 388/632] Fix missing column name in 1.x `IS NULL` example (#8595) --- doc/build/core/tutorial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst index e0b3e179fd0..9ec74fead7f 100644 --- a/doc/build/core/tutorial.rst +++ b/doc/build/core/tutorial.rst @@ -822,7 +822,7 @@ objects is at :class:`.ColumnOperators`. * :meth:`IS NULL <.ColumnOperators.is_>`:: - statement.where(users.c. == None) + statement.where(users.c.name == None) # alternatively, if pep8/linters are a concern statement.where(users.c.name.is_(None)) From 5ea99059c6c5677bb90078a0075cb9c9d7de83a7 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 6 Oct 2022 22:48:21 +0200 Subject: [PATCH 389/632] Add format docs to pre-commits Also report changes from main to 1_4 Change-Id: Ia41399155ee0ec1b878aebf18967eabe38f5afd1 --- .pre-commit-config.yaml | 8 + doc/build/changelog/unreleased_14/8588.rst | 2 +- tools/format_docs_code.py | 287 ++++++++++----------- 3 files changed, 145 insertions(+), 152 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 91b12737486..a648d37d2d0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,3 +26,11 @@ repos: # in case it requires a version pin - pydocstyle - pygments + +- repo: local + hooks: + - id: black-docs + name: Format docs code block with black + entry: python tools/format_docs_code.py --report-doctest -f + language: system + types: [rst] diff --git a/doc/build/changelog/unreleased_14/8588.rst b/doc/build/changelog/unreleased_14/8588.rst index 879b8b29073..474c14c4fa0 100644 --- a/doc/build/changelog/unreleased_14/8588.rst +++ b/doc/build/changelog/unreleased_14/8588.rst @@ -7,4 +7,4 @@ special keyword "ALGORITHM" in the middle, which was intended to be optional but was not working correctly. The change allows view reflection to work more completely on MySQL-compatible variants such as StarRocks. - Pull request courtesy John Bodley. \ No newline at end of file + Pull request courtesy John Bodley. diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py index 88e9288bc37..31a5b8e2ffa 100644 --- a/tools/format_docs_code.py +++ b/tools/format_docs_code.py @@ -1,8 +1,10 @@ from argparse import ArgumentParser from argparse import RawDescriptionHelpFormatter from collections.abc import Iterator +from functools import partial from pathlib import Path import re +from typing import NamedTuple from black import format_str from black.const import DEFAULT_LINE_LENGTH @@ -12,16 +14,18 @@ home = Path(__file__).parent.parent +ignore_paths = (re.compile(r"changelog/unreleased_\d{2}"),) -_Block = list[ - tuple[ - str, - int, - str | None, - str | None, - str, - ] -] + +class BlockLine(NamedTuple): + line: str + line_no: int + code: str + padding: str | None = None # relevant only on first line of block + sql_marker: str | None = None + + +_Block = list[BlockLine] def _format_block( @@ -29,44 +33,44 @@ def _format_block( exit_on_error: bool, errors: list[tuple[int, str, Exception]], is_doctest: bool, + file: str, ) -> list[str]: if not is_doctest: # The first line may have additional padding. Remove then restore later - add_padding = start_space.match(input_block[0][4]).groups()[0] + add_padding = start_space.match(input_block[0].code).groups()[0] skip = len(add_padding) code = "\n".join( - c[skip:] if c.startswith(add_padding) else c - for *_, c in input_block + l.code[skip:] if l.code.startswith(add_padding) else l.code + for l in input_block ) else: add_padding = None - code = "\n".join(c for *_, c in input_block) + code = "\n".join(l.code for l in input_block) try: formatted = format_str(code, mode=BLACK_MODE) except Exception as e: - start_line = input_block[0][1] - errors.append((start_line, code, e)) - if is_doctest: + start_line = input_block[0].line_no + first_error = not errors + if not REPORT_ONLY_DOCTEST or is_doctest: + type_ = "doctest" if is_doctest else "plain" + errors.append((start_line, code, e)) + if first_error: + print() # add newline print( - "Could not format code block starting at " - f"line {start_line}:\n{code}\nError: {e}" + f"--- {file}:{start_line} Could not format {type_} code " + f"block:\n{code}\n---Error: {e}" ) if exit_on_error: print("Exiting since --exit-on-error was passed") raise else: print("Ignoring error") - elif VERBOSE: - print( - "Could not format code block starting at " - f"line {start_line}:\n---\n{code}\n---Error: {e}" - ) - return [line for line, *_ in input_block] + return [l.line for l in input_block] else: formatted_code_lines = formatted.splitlines() - padding = input_block[0][2] - sql_prefix = input_block[0][3] or "" + padding = input_block[0].padding + sql_prefix = input_block[0].sql_marker or "" if is_doctest: formatted_lines = [ @@ -84,7 +88,7 @@ def _format_block( for fcl in formatted_code_lines[1:] ), ] - if not input_block[-1][0] and formatted_lines[-1]: + if not input_block[-1].line and formatted_lines[-1]: # last line was empty and black removed it. restore it formatted_lines.append("") return formatted_lines @@ -94,7 +98,8 @@ def _format_block( doctest_code_start = re.compile(r"^(\s+)({(?:opensql|sql|stop)})?>>>\s?(.+)") doctest_code_continue = re.compile(r"^\s+\.\.\.\s?(\s*.*)") -sql_code_start = re.compile(r"^(\s+){(?:open)?sql}") + +sql_code_start = re.compile(r"^(\s+)({(?:open)?sql})") sql_code_stop = re.compile(r"^(\s+){stop}") start_code_section = re.compile( @@ -104,7 +109,7 @@ def _format_block( def format_file( - file: Path, exit_on_error: bool, check: bool, no_plain: bool + file: Path, exit_on_error: bool, check: bool ) -> tuple[bool, int]: buffer = [] if not check: @@ -120,18 +125,44 @@ def format_file( errors = [] + do_doctest_format = partial( + _format_block, + exit_on_error=exit_on_error, + errors=errors, + is_doctest=True, + file=str(file), + ) + + def doctest_format(): + nonlocal doctest_block + if doctest_block: + buffer.extend(do_doctest_format(doctest_block)) + doctest_block = None + + do_plain_format = partial( + _format_block, + exit_on_error=exit_on_error, + errors=errors, + is_doctest=False, + file=str(file), + ) + + def plain_format(): + nonlocal plain_block + if plain_block: + buffer.extend(do_plain_format(plain_block)) + plain_block = None + disable_format = False for line_no, line in enumerate(original.splitlines(), 1): - # start_code_section requires no spaces at the start - if start_code_section.match(line.strip()): - if plain_block: - buffer.extend( - _format_block( - plain_block, exit_on_error, errors, is_doctest=False - ) - ) - plain_block = None + if ( + line + and not disable_format + and start_code_section.match(line.strip()) + ): + # start_code_section regexp requires no spaces at the start + plain_format() plain_code_section = True assert not sql_section plain_padding = start_space.match(line).groups()[0] @@ -145,22 +176,18 @@ def format_file( ): plain_code_section = sql_section = False elif match := format_directive.match(line): + assert not plain_code_section disable_format = match.groups()[0] == "off" if doctest_block: assert not plain_block if match := doctest_code_continue.match(line): doctest_block.append( - (line, line_no, None, None, match.groups()[0]) + BlockLine(line, line_no, match.groups()[0]) ) continue else: - buffer.extend( - _format_block( - doctest_block, exit_on_error, errors, is_doctest=True - ) - ) - doctest_block = None + doctest_format() elif plain_block: if ( plain_code_section @@ -168,87 +195,62 @@ def format_file( and not sql_code_start.match(line) ): plain_block.append( - (line, line_no, None, None, line[plain_padding_len:]) + BlockLine(line, line_no, line[plain_padding_len:]) ) continue else: - buffer.extend( - _format_block( - plain_block, exit_on_error, errors, is_doctest=False - ) - ) - plain_block = None + plain_format() if line and (match := doctest_code_start.match(line)): + # the line is in a doctest plain_code_section = sql_section = False - if plain_block: - buffer.extend( - _format_block( - plain_block, exit_on_error, errors, is_doctest=False - ) - ) - plain_block = None - padding, code = match.group(1, 3) - doctest_block = [(line, line_no, padding, match.group(2), code)] - elif ( - line - and plain_code_section - and (match := sql_code_start.match(line)) - ): - if plain_block: - buffer.extend( - _format_block( - plain_block, exit_on_error, errors, is_doctest=False - ) - ) - plain_block = None - - sql_section = True - buffer.append(line) - elif line and sql_section and (match := sql_code_stop.match(line)): - sql_section = False - line = line.replace("{stop}", "") + plain_format() + padding, sql_marker, code = match.groups() + doctest_block = [ + BlockLine(line, line_no, code, padding, sql_marker) + ] + elif line and plain_code_section: + assert not disable_format assert not doctest_block - # start of a plain block - if line.strip(): + if match := sql_code_start.match(line): + plain_format() + sql_section = True + buffer.append(line) + elif sql_section: + if match := sql_code_stop.match(line): + sql_section = False + no_stop_line = line.replace("{stop}", "") + # start of a plain block + if no_stop_line.strip(): + assert not plain_block + plain_block = [ + BlockLine( + line, + line_no, + no_stop_line[plain_padding_len:], + plain_padding, + "{stop}", + ) + ] + continue + buffer.append(line) + else: + # start of a plain block + assert not doctest_block plain_block = [ - ( + BlockLine( line, line_no, - plain_padding, - "{stop}", line[plain_padding_len:], + plain_padding, ) ] - - elif ( - line - and not no_plain - and not disable_format - and plain_code_section - and not sql_section - ): - assert not doctest_block - # start of a plain block - plain_block = [ - (line, line_no, plain_padding, None, line[plain_padding_len:]) - ] else: buffer.append(line) - if doctest_block: - buffer.extend( - _format_block( - doctest_block, exit_on_error, errors, is_doctest=True - ) - ) - if plain_block: - buffer.extend( - _format_block(plain_block, exit_on_error, errors, is_doctest=False) - ) + doctest_format() + plain_format() if buffer: - # if there is nothing in the buffer something strange happened so - # don't do anything buffer.append("") updated = "\n".join(buffer) equal = original == updated @@ -261,6 +263,8 @@ def format_file( # write only if there are changes to write file.write_text(updated, "utf-8", newline="\n") else: + # if there is nothing in the buffer something strange happened so + # don't do anything if not check: print(".. Nothing to write") equal = bool(original) is False @@ -271,22 +275,20 @@ def format_file( return equal, len(errors) -def iter_files(directory) -> Iterator[Path]: - yield from (home / directory).glob("./**/*.rst") +def iter_files(directory: str) -> Iterator[Path]: + yield from ( + file + for file in (home / directory).glob("./**/*.rst") + if not any(pattern.search(file.as_posix()) for pattern in ignore_paths) + ) -def main( - file: str | None, - directory: str, - exit_on_error: bool, - check: bool, - no_plain: bool, -): +def main(file: str | None, directory: str, exit_on_error: bool, check: bool): if file is not None: - result = [format_file(Path(file), exit_on_error, check, no_plain)] + result = [format_file(Path(file), exit_on_error, check)] else: result = [ - format_file(doc, exit_on_error, check, no_plain) + format_file(doc, exit_on_error, check) for doc in iter_files(directory) ] @@ -308,22 +310,19 @@ def main( else "no formatting errors reported", ) - # interim, until we fix all formatting errors - if not to_reformat: - exit(0) exit(1) if __name__ == "__main__": parser = ArgumentParser( description="""Formats code inside docs using black. Supports \ -doctest code blocks and also tries to format plain code block identifies as \ -all indented blocks of at least 4 spaces, unless '--no-plain' is specified. +doctest code blocks and plain code block identified as indented sections \ +that are preceded by ``::`` or ``.. sourcecode:: py``. + +To disable formatting on a file section the comment ``.. format: off`` \ +disables formatting until ``.. format: on`` is encountered or the file ends. -Plain code block may lead to false positive. To disable formatting on a \ -file section the comment ``.. format: off`` disables formatting until \ -``.. format: on`` is encountered or the file ends. -Another alterative is to use less than 4 spaces to indent the code block. +Use --report-doctest to ignore errors on plain code blocks. """, formatter_class=RawDescriptionHelpFormatter, ) @@ -341,14 +340,13 @@ def main( "--check", help="Don't write the files back, just return the " "status. Return code 0 means nothing would change. " - "Return code 1 means some files would be reformatted.", + "Return code 1 means some files would be reformatted", action="store_true", ) parser.add_argument( "-e", "--exit-on-error", - help="Exit in case of black format error instead of ignoring it. " - "This option is only valid for doctest code blocks", + help="Exit in case of black format error instead of ignoring it", action="store_true", ) parser.add_argument( @@ -359,16 +357,9 @@ def main( action="store_true", ) parser.add_argument( - "-v", - "--verbose", - help="Increase verbosity", - action="store_true", - ) - parser.add_argument( - "-n", - "--no-plain", - help="Disable plain code blocks formatting that's more difficult " - "to parse compared to doctest code blocks", + "-rd", "--report-doctest", + help="Report errors only when running doctest blocks. When active " + "exit-on-error will be valid only on doctest blocks", action="store_true", ) args = parser.parse_args() @@ -384,12 +375,6 @@ def main( if args.project_line_length else DEFAULT_LINE_LENGTH, ) - VERBOSE = args.verbose - - main( - args.file, - args.directory, - args.exit_on_error, - args.check, - args.no_plain, - ) + REPORT_ONLY_DOCTEST = args.report_doctest + + main(args.file, args.directory, args.exit_on_error, args.check) From 41df10db65ef5dfd5d04644e5f447908dad33cb8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 7 Oct 2022 11:25:08 -0400 Subject: [PATCH 390/632] dont mutate bind_arguments incoming dictionary The :paramref:`_orm.Session.execute.bind_arguments` dictionary is no longer mutated when passed to :meth:`_orm.Session.execute` and similar; instead, it's copied to an internal dictionary for state changes. Among other things, this fixes and issue where the "clause" passed to the :meth:`_orm.Session.get_bind` method would be incorrectly referring to the :class:`_sql.Select` construct used for the "fetch" synchronization strategy, when the actual query being emitted was a :class:`_dml.Delete` or :class:`_dml.Update`. This would interfere with recipes for "routing sessions". Fixes: #8614 Change-Id: I8d237449485c9bbf41db2b29a34b6136aa43b7bc (cherry picked from commit 3efc9e1df378be8046d4b1f1b624968a62eb100f) --- doc/build/changelog/unreleased_14/8614.rst | 13 ++++++++ lib/sqlalchemy/orm/session.py | 2 ++ test/orm/test_bind.py | 22 ++++++++++++ test/orm/test_update_delete.py | 39 ++++++++++++++++++++++ 4 files changed, 76 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8614.rst diff --git a/doc/build/changelog/unreleased_14/8614.rst b/doc/build/changelog/unreleased_14/8614.rst new file mode 100644 index 00000000000..b975dbc1f96 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8614.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, orm + :tickets: 8614 + + The :paramref:`_orm.Session.execute.bind_arguments` dictionary is no longer + mutated when passed to :meth:`_orm.Session.execute` and similar; instead, + it's copied to an internal dictionary for state changes. Among other + things, this fixes and issue where the "clause" passed to the + :meth:`_orm.Session.get_bind` method would be incorrectly referring to the + :class:`_sql.Select` construct used for the "fetch" synchronization + strategy, when the actual query being emitted was a :class:`_dml.Delete` or + :class:`_dml.Update`. This would interfere with recipes for "routing + sessions". diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 4b05381db20..79b723184d3 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1639,6 +1639,8 @@ def execute( bind_arguments.update(kw) elif not bind_arguments: bind_arguments = {} + else: + bind_arguments = dict(bind_arguments) if ( statement._propagate_attrs.get("compile_state_plugin", None) diff --git a/test/orm/test_bind.py b/test/orm/test_bind.py index 6326e0c4dca..802de996969 100644 --- a/test/orm/test_bind.py +++ b/test/orm/test_bind.py @@ -290,6 +290,28 @@ def test_get_bind(self, testcase, expected): sess.close() + @testing.combinations(True, False) + def test_dont_mutate_binds(self, empty_dict): + users, User = ( + self.tables.users, + self.classes.User, + ) + + mp = self.mapper_registry.map_imperatively(User, users) + + sess = fixture_session() + + if empty_dict: + bind_arguments = {} + else: + bind_arguments = {"mapper": mp} + sess.execute(select(1), bind_arguments=bind_arguments) + + if empty_dict: + eq_(bind_arguments, {}) + else: + eq_(bind_arguments, {"mapper": mp}) + @testing.combinations( ( lambda session, Address: session.query(Address).statement, diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py index 4eabe2f6c49..6be271e4603 100644 --- a/test/orm/test_update_delete.py +++ b/test/orm/test_update_delete.py @@ -22,6 +22,9 @@ from sqlalchemy.orm import sessionmaker from sqlalchemy.orm import synonym from sqlalchemy.orm import with_loader_criteria +from sqlalchemy.sql.dml import Delete +from sqlalchemy.sql.dml import Update +from sqlalchemy.sql.selectable import Select from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ @@ -1460,6 +1463,42 @@ def test_update_preserve_parameter_order_future(self): ] eq_(["name", "age_int"], cols) + @testing.combinations(("update",), ("delete",), argnames="stmt_type") + @testing.combinations( + ("evaluate",), ("fetch",), (None,), argnames="sync_type" + ) + def test_routing_session(self, stmt_type, sync_type, connection): + User = self.classes.User + + if stmt_type == "update": + stmt = update(User).values(age=123) + expected = [Update] + elif stmt_type == "delete": + stmt = delete(User) + expected = [Delete] + else: + assert False + + received = [] + + class RoutingSession(Session): + def get_bind(self, **kw): + received.append(type(kw["clause"])) + return super(RoutingSession, self).get_bind(**kw) + + stmt = stmt.execution_options(synchronize_session=sync_type) + + if sync_type == "fetch": + expected.insert(0, Select) + + if not connection.dialect.full_returning: + expected.insert(0, Select) + + with RoutingSession(bind=connection) as sess: + sess.execute(stmt) + + eq_(received, expected) + class UpdateDeleteIgnoresLoadersTest(fixtures.MappedTest): @classmethod From a84f474051cae710e33b3d9486194ed534fe0167 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Oct 2022 10:42:59 -0400 Subject: [PATCH 391/632] remove redundant paragraph backported from main; see comment at https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/4042/52..61/doc/build/orm/queryguide/api.rst#239 Change-Id: I214784ef8f6ff523c3cc55df2318a85d45a0690d --- doc/build/orm/queryguide.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/doc/build/orm/queryguide.rst b/doc/build/orm/queryguide.rst index 70254234e45..3da22ebd264 100644 --- a/doc/build/orm/queryguide.rst +++ b/doc/build/orm/queryguide.rst @@ -1092,11 +1092,6 @@ partitions. The size of each partition defaults to the integer value passed to (User(id=1, name='spongebob', fullname='Spongebob Squarepants'),) ... -When ``yield_per`` is used, the -:paramref:`_engine.Connection.execution_options.stream_results` option is also -set for the Core execution, so that a streaming / server side cursor will be -used if the backend supports it. - The ``yield_per`` execution option **is not compatible** with :ref:`"subquery" eager loading ` loading or :ref:`"joined" eager loading ` when using collections. It From 855472e963bcebe426f1fb89e1b82947e4f5208f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Oct 2022 14:03:04 -0400 Subject: [PATCH 392/632] warn for no polymorphic identity w/ poly hierarchy A warning is emitted when attempting to configure a mapped class within an inheritance hierarchy where the mapper is not given any polymorphic identity, however there is a polymorphic discriminator column assigned. Such classes should be abstract if they never intend to load directly. Fixes: #7545 Change-Id: I94f04e59736c73e3f39d883a75d763e3f06ecc3d (cherry picked from commit bf0634131115a76aaca52eebd3c7d3fb52f8258b) --- doc/build/changelog/unreleased_14/7545.rst | 9 +++++++ lib/sqlalchemy/orm/mapper.py | 23 ++++++++++++++--- test/orm/declarative/test_inheritance.py | 10 ++++++++ test/orm/inheritance/test_basic.py | 30 +++++++++++++++++++++- test/orm/test_events.py | 1 + 5 files changed, 69 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/7545.rst diff --git a/doc/build/changelog/unreleased_14/7545.rst b/doc/build/changelog/unreleased_14/7545.rst new file mode 100644 index 00000000000..ea31d1a3c3f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7545.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 7545 + + A warning is emitted when attempting to configure a mapped class within an + inheritance hierarchy where the mapper is not given any polymorphic + identity, however there is a polymorphic discriminator column assigned. + Such classes should be abstract if they never intend to load directly. + diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 2554dde0de8..15eae2dac09 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1079,10 +1079,27 @@ def _configure_inheritance(self): else: self.persist_selectable = self.local_table - if self.polymorphic_identity is not None and not self.concrete: - self._identity_class = self.inherits._identity_class - else: + if self.polymorphic_identity is None: + self._identity_class = self.class_ + + if self.inherits.base_mapper.polymorphic_on is not None: + util.warn( + "Mapper %s does not indicate a polymorphic_identity, " + "yet is part of an inheritance hierarchy that has a " + "polymorphic_on column of '%s'. Objects of this type " + "cannot be loaded polymorphically which can lead to " + "degraded or incorrect loading behavior in some " + "scenarios. Please establish a polmorphic_identity " + "for this class, or leave it un-mapped. " + "To omit mapping an intermediary class when using " + "declarative, set the '__abstract__ = True' " + "attribute on that class." + % (self, self.inherits.base_mapper.polymorphic_on) + ) + elif self.concrete: self._identity_class = self.class_ + else: + self._identity_class = self.inherits._identity_class if self.version_id_col is None: self.version_id_col = self.inherits.version_id_col diff --git a/test/orm/declarative/test_inheritance.py b/test/orm/declarative/test_inheritance.py index 7e43e255954..7f1b47f3758 100644 --- a/test/orm/declarative/test_inheritance.py +++ b/test/orm/declarative/test_inheritance.py @@ -38,6 +38,7 @@ def teardown_test(self): class DeclarativeInheritanceTest(DeclarativeTestBase): + @testing.emits_warning(r".*does not indicate a polymorphic_identity") def test_we_must_copy_mapper_args(self): class Person(Base): @@ -673,6 +674,9 @@ class Employee(Person): __tablename__ = "employee" id = Column(Integer, ForeignKey(Person.id), primary_key=True) + __mapper_args__ = { + "polymorphic_identity": "employee", + } class Engineer(Employee): __mapper_args__ = {"polymorphic_identity": "engineer"} @@ -1007,9 +1011,15 @@ class Manager(Person): __mapper_args__ = {"polymorphic_identity": "manager"} id = Column(Integer, ForeignKey("people.id"), primary_key=True) golf_swing = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "manager", + } class Boss(Manager): boss_name = Column(String(50)) + __mapper_args__ = { + "polymorphic_identity": "boss", + } is_( Boss.__mapper__.column_attrs["boss_name"].columns[0], diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index 6285a80a7f3..9daafb7cefb 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -32,6 +32,7 @@ from sqlalchemy.testing import assert_raises from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock @@ -3414,7 +3415,12 @@ def setup_mappers(cls): cls.mapper_registry.map_imperatively( A, base, polymorphic_on=base.c.type ) - cls.mapper_registry.map_imperatively(B, inherits=A) + + with expect_warnings( + r"Mapper mapped class B->base does not indicate a " + "polymorphic_identity," + ): + cls.mapper_registry.map_imperatively(B, inherits=A) cls.mapper_registry.map_imperatively( C, inherits=B, polymorphic_identity="c" ) @@ -3424,6 +3430,28 @@ def setup_mappers(cls): cls.mapper_registry.map_imperatively( E, inherits=A, polymorphic_identity="e" ) + cls.mapper_registry.configure() + + def test_warning(self, decl_base): + """test #7545""" + + class A(decl_base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + type = Column(String) + + __mapper_args__ = {"polymorphic_on": type} + + class B(A): + __mapper_args__ = {"polymorphic_identity": "b"} + + with expect_warnings( + r"Mapper mapped class C->a does not indicate a " + "polymorphic_identity," + ): + + class C(A): + __mapper_args__ = {} def test_load_from_middle(self): C, B = self.classes.C, self.classes.B diff --git a/test/orm/test_events.py b/test/orm/test_events.py index 50265510042..efb39bd2fdc 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -1209,6 +1209,7 @@ class Animal(AnotherBase): # not been loaded yet (Employer), and therefore cannot be configured: class Mammal(Animal): nonexistent = relationship("Nonexistent") + __mapper_args__ = {"polymorphic_identity": "mammal"} # These new classes should not be configured at this point: unconfigured = list(mapperlib._unconfigured_mappers()) From 54f24fdc8e533a7d665048fa3dd5e46edbb80aec Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 11 Oct 2022 15:32:47 -0400 Subject: [PATCH 393/632] support multiple files to work correctly w/ pre-commit Change-Id: I7ddf1848b96105701b733306353ae949a4579339 --- tools/format_docs_code.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/format_docs_code.py b/tools/format_docs_code.py index 31a5b8e2ffa..05e5e01f10a 100644 --- a/tools/format_docs_code.py +++ b/tools/format_docs_code.py @@ -283,9 +283,11 @@ def iter_files(directory: str) -> Iterator[Path]: ) -def main(file: str | None, directory: str, exit_on_error: bool, check: bool): +def main( + file: list[str] | None, directory: str, exit_on_error: bool, check: bool +): if file is not None: - result = [format_file(Path(file), exit_on_error, check)] + result = [format_file(Path(f), exit_on_error, check) for f in file] else: result = [ format_file(doc, exit_on_error, check) @@ -327,7 +329,10 @@ def main(file: str | None, directory: str, exit_on_error: bool, check: bool): formatter_class=RawDescriptionHelpFormatter, ) parser.add_argument( - "-f", "--file", help="Format only this file instead of all docs" + "-f", + "--file", + help="Format only this file instead of all docs", + nargs="+", ) parser.add_argument( "-d", @@ -357,7 +362,8 @@ def main(file: str | None, directory: str, exit_on_error: bool, check: bool): action="store_true", ) parser.add_argument( - "-rd", "--report-doctest", + "-rd", + "--report-doctest", help="Report errors only when running doctest blocks. When active " "exit-on-error will be valid only on doctest blocks", action="store_true", From 9d81c792c354f4a01d64bcb44ad3a8c3eeae931d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 14 Oct 2022 09:17:09 -0400 Subject: [PATCH 394/632] narrow formatting in table, turn format off Change-Id: I0824495e0582657ffb63eaa2466021f56005c81c References: https://github.com/sqlalchemy/sqlalchemy/discussions/8157#discussioncomment-3878806 (cherry picked from commit 1985cb0f48b298ffc445b052cd62a8d4a81f4e10) --- doc/build/changelog/migration_10.rst | 2 +- doc/build/changelog/migration_20.rst | 116 ++++++++++++++++++++------- doc/build/orm/declarative_config.rst | 6 +- 3 files changed, 93 insertions(+), 31 deletions(-) diff --git a/doc/build/changelog/migration_10.rst b/doc/build/changelog/migration_10.rst index 2ff86415015..0c5f9187dce 100644 --- a/doc/build/changelog/migration_10.rst +++ b/doc/build/changelog/migration_10.rst @@ -181,7 +181,7 @@ applied:: class MySubClass(MyClass): - """""" + """ """ # ... diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 105108434f6..d0d92c0b968 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -1156,6 +1156,7 @@ calling form with links to documentation for each technique presented. The individual migration notes are in the embedded sections following the table, and may include additional notes not summarized here. +.. format: off .. container:: sliding-table @@ -1182,9 +1183,15 @@ following the table, and may include additional notes not summarized here. - :: - session.execute(select(User)).scalars().all() + session.execute( + select(User) + ).scalars().all() + # or - session.scalars(select(User)).all() + + session.scalars( + select(User) + ).all() - :ref:`migration_20_unify_select` @@ -1193,11 +1200,16 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).filter_by(name="some user").one() + session.query(User).\ + filter_by(name="some user").\ + one() - :: - session.execute(select(User).filter_by(name="some user")).scalar_one() + session.execute( + select(User). + filter_by(name="some user") + ).scalar_one() - :ref:`migration_20_unify_select` @@ -1205,11 +1217,17 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).filter_by(name="some user").first() + session.query(User).\ + filter_by(name="some user").\ + first() - :: - session.scalars(select(User).filter_by(name="some user").limit(1)).first() + session.scalars( + select(User). + filter_by(name="some user"). + limit(1) + ).first() - :ref:`migration_20_unify_select` @@ -1217,22 +1235,38 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).options(joinedload(User.addresses)).all() + session.query(User).options( + joinedload(User.addresses) + ).all() - :: - session.scalars(select(User).options(joinedload(User.addresses))).unique().all() + session.scalars( + select(User). + options( + joinedload(User.addresses) + ) + ).unique().all() - :ref:`joinedload_not_uniqued` * - :: - session.query(User).join(Address).filter(Address.email == "e@sa.us").all() + session.query(User).\ + join(Address).\ + filter( + Address.email == "e@sa.us" + ).\ + all() - :: session.execute( - select(User).join(Address).where(Address.email == "e@sa.us") + select(User). + join(Address). + where( + Address.email == "e@sa.us" + ) ).scalars().all() - :ref:`migration_20_unify_select` @@ -1241,27 +1275,43 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).from_statement(text("select * from users")).all() + session.query(User).\ + from_statement( + text("select * from users") + ).\ + all() - :: - session.scalars(select(User).from_statement(text("select * from users"))).all() + session.scalars( + select(User). + from_statement( + text("select * from users") + ) + ).all() - :ref:`orm_queryguide_selecting_text` * - :: - session.query(User).join(User.addresses).options( + session.query(User).\ + join(User.addresses).\ + options( contains_eager(User.addresses) - ).populate_existing().all() + ).\ + populate_existing().all() - :: session.execute( - select(User) - .join(User.addresses) - .options(contains_eager(User.addresses)) - .execution_options(populate_existing=True) + select(User) + .join(User.addresses) + .options( + contains_eager(User.addresses) + ) + .execution_options( + populate_existing=True + ) ).scalars().all() - @@ -1273,17 +1323,22 @@ following the table, and may include additional notes not summarized here. * - :: - session.query(User).filter(User.name == "foo").update( - {"fullname": "Foo Bar"}, synchronize_session="evaluate" - ) + session.query(User).\ + filter(User.name == "foo").\ + update( + {"fullname": "Foo Bar"}, + synchronize_session="evaluate" + ) - :: session.execute( - update(User) - .where(User.name == "foo") - .values(fullname="Foo Bar") - .execution_options(synchronize_session="evaluate") + update(User) + .where(User.name == "foo") + .values(fullname="Foo Bar") + .execution_options( + synchronize_session="evaluate" + ) ) - :ref:`orm_expression_update_delete` @@ -1295,11 +1350,18 @@ following the table, and may include additional notes not summarized here. - :: - session.scalar(select(func.count()).select_from(User)) - session.scalar(select(func.count(User.id))) + session.scalar( + select(func.count()). + select_from(User) + ) + session.scalar( + select(func.count(User.id)) + ) - :meth:`_orm.Session.scalar` +.. format: on + .. _migration_20_unify_select: ORM Query Unified with Core Select diff --git a/doc/build/orm/declarative_config.rst b/doc/build/orm/declarative_config.rst index 3a811ed82ec..9f031bd6e1d 100644 --- a/doc/build/orm/declarative_config.rst +++ b/doc/build/orm/declarative_config.rst @@ -320,7 +320,7 @@ assumed to be completed and the 'configure' step has finished:: class MyClass(Base): @classmethod def __declare_last__(cls): - """""" + """ """ # do something with mappings ``__declare_first__()`` @@ -332,7 +332,7 @@ configuration via the :meth:`.MapperEvents.before_configured` event:: class MyClass(Base): @classmethod def __declare_first__(cls): - """""" + """ """ # do something before mappings are configured .. versionadded:: 0.9.3 @@ -423,7 +423,7 @@ subclasses to extend just from the special class:: __abstract__ = True def some_helpful_method(self): - """""" + """ """ @declared_attr def __mapper_args__(cls): From b633102fe79862155be58e4466cc999da6560ed9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 14 Oct 2022 11:33:54 -0400 Subject: [PATCH 395/632] correct python compat statement 2.0 uses 3.7 at a minimum, update verbiage here. Change-Id: I076e205bbfc9b502f1ac382f6292c290c3fc8c1b (cherry picked from commit cb0dd78f584cea4f169a47a2f4660300f4a6e081) --- doc/build/changelog/migration_20.rst | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index d0d92c0b968..4e738e948e4 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -117,21 +117,16 @@ as being in this realm are as follows: For the full overview of SQLAlchemy 1.4 changes, see the :doc:`/changelog/migration_14` document. -Migration to 2.0 Step One - Python 3 only (Python 3.6 minimum) --------------------------------------------------------------- - -SQLAlchemy 2.0 was first inspired by the fact that Python 2's EOL was in -2020. SQLAlchemy is taking a longer period of time than other major -projects to drop Python 2.7 support, since it is not too much in the way -of things for the moment. However, version 2.0 hopes to start embracing -:pep:`484` and other new features to a great degree, so it is likely -that release 1.4 will be the last Python 2 supporting version, even if -there is a SQLAlchemy 1.5 (which is also unlikely at the moment). - -In order to use SQLAlchemy 2.0, the application will need to be runnable on -at least **Python 3.6** as of this writing. SQLAlchemy 1.4 now supports -Python 3.6 or newer within the Python 3 series; throughout the 1.4 series, -the application can remain running on Python 2.7 or on at least Python 3.6. +Migration to 2.0 Step One - Python 3 only (Python 3.7 minimum for 2.0 compatibility) +------------------------------------------------------------------------------------ + +SQLAlchemy 2.0 was first inspired by the fact that Python 2's EOL was in 2020. +SQLAlchemy is taking a longer period of time than other major projects to drop +Python 2.7 support. However, in order to use SQLAlchemy 2.0, the application +will need to be runnable on at least **Python 3.7**. SQLAlchemy 1.4 supports +Python 3.6 or newer within the Python 3 series; throughout the 1.4 series, the +application can remain running on Python 2.7 or on at least Python 3.6. Version +2.0 however starts at Python 3.7. .. _migration_20_deprecations_mode: From 5d45f5eb750e73a1e77175041f2c5751e93b3c57 Mon Sep 17 00:00:00 2001 From: Muhammad Abdur Rakib <103581704+rifatrakib@users.noreply.github.com> Date: Fri, 14 Oct 2022 23:53:49 +0600 Subject: [PATCH 396/632] fix instances of objects as instances of classes (#8627) document mentions `creating instances of "User" and "Address" objects` which is a mistake as we create instances or objects from classes. (cherry picked from commit fe89ffe7563192a0b8f83b045f6e35fbed2a4a19) --- doc/build/orm/quickstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/orm/quickstart.rst b/doc/build/orm/quickstart.rst index f1240e7bd83..d766ef1d77b 100644 --- a/doc/build/orm/quickstart.rst +++ b/doc/build/orm/quickstart.rst @@ -163,7 +163,7 @@ Create Objects and Persist --------------------------- We are now ready to insert data in the database. We accomplish this by -creating instances of ``User`` and ``Address`` objects, which have +creating instances of ``User`` and ``Address`` classes, which have an ``__init__()`` method already as established automatically by the declarative mapping process. We then pass them to the database using an object called a :ref:`Session `, From 1a29f86e6591628cd0de189bcf624d88995fd486 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 15 Oct 2022 11:12:25 -0400 Subject: [PATCH 397/632] disable isort in pyproject.toml disable isort, for IDEs that just default isort to be turned on, e.g. vscode. we use flake8-import-order for import sorting, using zimports to actually reformat code. isort is nicer in many ways but doesn't have our "import *" fixer and also is not 100% compatible with flake8-import-order. Change-Id: I8e53d475cdc1d6178e2c9276d2b21d47be207ede (cherry picked from commit e8da50ce0f0474bc89cee15603931760cb6c55ce) --- pyproject.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index f82dbd468b3..da6c4cba069 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,4 +6,11 @@ target-version = ['py27', 'py36'] black-line-length = 79 keep-unused-type-checking = true +# disable isort, for IDEs that just default isort to be turned on, e.g. vscode. +# we use flake8-import-order for import sorting, using zimports to actually +# reformat code. isort is nicer in many ways but doesn't have our +# "import *" fixer and also is not 100% compatible with flake8-import-order. +[tool.isort] +skip_glob=['*'] + From cf30cb414c9af770cf6addd940ea76391701be58 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 16 Oct 2022 10:09:20 -0400 Subject: [PATCH 398/632] add python 3.11 to supported versions Change-Id: Ibf699297b12c1c72c570db380282e97adfdef2b9 (cherry picked from commit 7fd8898b10669c8e6b08c5d66c92bdc975aaa353) --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 8ec3ffa7948..45859cb6ccc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,6 +26,7 @@ classifiers = Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 + Programming Language :: Python :: 3.11 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Database :: Front-Ends From 8aa07c8e62a0483e0c882ffbf97453d5e4614efe Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 16 Oct 2022 10:12:34 -0400 Subject: [PATCH 399/632] - 1.4.42 --- doc/build/changelog/changelog_14.rst | 111 ++++++++++++++++++++- doc/build/changelog/unreleased_14/7094.rst | 9 -- doc/build/changelog/unreleased_14/7545.rst | 9 -- doc/build/changelog/unreleased_14/8507.rst | 13 --- doc/build/changelog/unreleased_14/8516.rst | 9 -- doc/build/changelog/unreleased_14/8525.rst | 10 -- doc/build/changelog/unreleased_14/8536.rst | 8 -- doc/build/changelog/unreleased_14/8569.rst | 13 --- doc/build/changelog/unreleased_14/8574.rst | 5 - doc/build/changelog/unreleased_14/8588.rst | 10 -- doc/build/changelog/unreleased_14/8614.rst | 13 --- doc/build/conf.py | 4 +- 12 files changed, 112 insertions(+), 102 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/7094.rst delete mode 100644 doc/build/changelog/unreleased_14/7545.rst delete mode 100644 doc/build/changelog/unreleased_14/8507.rst delete mode 100644 doc/build/changelog/unreleased_14/8516.rst delete mode 100644 doc/build/changelog/unreleased_14/8525.rst delete mode 100644 doc/build/changelog/unreleased_14/8536.rst delete mode 100644 doc/build/changelog/unreleased_14/8569.rst delete mode 100644 doc/build/changelog/unreleased_14/8574.rst delete mode 100644 doc/build/changelog/unreleased_14/8588.rst delete mode 100644 doc/build/changelog/unreleased_14/8614.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 5a267ebd0d4..f82d623a208 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,116 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.42 - :include_notes_from: unreleased_14 + :released: October 16, 2022 + + .. change:: + :tags: bug, asyncio + :tickets: 8516 + + Improved implementation of ``asyncio.shield()`` used in context managers as + added in :ticket:`8145`, such that the "close" operation is enclosed within + an ``asyncio.Task`` which is then strongly referenced as the operation + proceeds. This is per Python documentation indicating that the task is + otherwise not strongly referenced. + + .. change:: + :tags: bug, orm + :tickets: 8614 + + The :paramref:`_orm.Session.execute.bind_arguments` dictionary is no longer + mutated when passed to :meth:`_orm.Session.execute` and similar; instead, + it's copied to an internal dictionary for state changes. Among other + things, this fixes and issue where the "clause" passed to the + :meth:`_orm.Session.get_bind` method would be incorrectly referring to the + :class:`_sql.Select` construct used for the "fetch" synchronization + strategy, when the actual query being emitted was a :class:`_dml.Delete` or + :class:`_dml.Update`. This would interfere with recipes for "routing + sessions". + + .. change:: + :tags: bug, orm + :tickets: 7094 + + A warning is emitted in ORM configurations when an explicit + :func:`_orm.remote` annotation is applied to columns that are local to the + immediate mapped class, when the referenced class does not include any of + the same table columns. Ideally this would raise an error at some point as + it's not correct from a mapping point of view. + + .. change:: + :tags: bug, orm + :tickets: 7545 + + A warning is emitted when attempting to configure a mapped class within an + inheritance hierarchy where the mapper is not given any polymorphic + identity, however there is a polymorphic discriminator column assigned. + Such classes should be abstract if they never intend to load directly. + + + .. change:: + :tags: bug, mssql, regression + :tickets: 8525 + + Fixed yet another regression in SQL Server isolation level fetch (see + :ticket:`8231`, :ticket:`8475`), this time with "Microsoft Dynamics CRM + Database via Azure Active Directory", which apparently lacks the + ``system_views`` view entirely. Error catching has been extended that under + no circumstances will this method ever fail, provided database connectivity + is present. + + .. change:: + :tags: orm, bug, regression + :tickets: 8569 + + Fixed regression for 1.4 in :func:`_orm.contains_eager` where the "wrap in + subquery" logic of :func:`_orm.joinedload` would be inadvertently triggered + for use of the :func:`_orm.contains_eager` function with similar statements + (e.g. those that use ``distinct()``, ``limit()`` or ``offset()``), which + would then lead to secondary issues with queries that used some + combinations of SQL label names and aliasing. This "wrapping" is not + appropriate for :func:`_orm.contains_eager` which has always had the + contract that the user-defined SQL statement is unmodified with the + exception of adding the appropriate columns to be fetched. + + .. change:: + :tags: bug, orm, regression + :tickets: 8507 + + Fixed regression where using ORM update() with synchronize_session='fetch' + would fail due to the use of evaluators that are now used to determine the + in-Python value for expressions in the the SET clause when refreshing + objects; if the evaluators make use of math operators against non-numeric + values such as PostgreSQL JSONB, the non-evaluable condition would fail to + be detected correctly. The evaluator now limits the use of math mutation + operators to numeric types only, with the exception of "+" that continues + to work for strings as well. SQLAlchemy 2.0 may alter this further by + fetching the SET values completely rather than using evaluation. + + .. change:: + :tags: usecase, postgresql + :tickets: 8574 + + :class:`_postgresql.aggregate_order_by` now supports cache generation. + + .. change:: + :tags: bug, mysql + :tickets: 8588 + + Adjusted the regular expression used to match "CREATE VIEW" when + testing for views to work more flexibly, no longer requiring the + special keyword "ALGORITHM" in the middle, which was intended to be + optional but was not working correctly. The change allows view reflection + to work more completely on MySQL-compatible variants such as StarRocks. + Pull request courtesy John Bodley. + + .. change:: + :tags: bug, engine + :tickets: 8536 + + Fixed issue where mixing "*" with additional explicitly-named column + expressions within the columns clause of a :func:`_sql.select` construct + would cause result-column targeting to sometimes consider the label name or + other non-repeated names to be an ambiguous target. .. changelog:: :version: 1.4.41 diff --git a/doc/build/changelog/unreleased_14/7094.rst b/doc/build/changelog/unreleased_14/7094.rst deleted file mode 100644 index b6fb30d9989..00000000000 --- a/doc/build/changelog/unreleased_14/7094.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7094 - - A warning is emitted in ORM configurations when an explicit - :func:`_orm.remote` annotation is applied to columns that are local to the - immediate mapped class, when the referenced class does not include any of - the same table columns. Ideally this would raise an error at some point as - it's not correct from a mapping point of view. diff --git a/doc/build/changelog/unreleased_14/7545.rst b/doc/build/changelog/unreleased_14/7545.rst deleted file mode 100644 index ea31d1a3c3f..00000000000 --- a/doc/build/changelog/unreleased_14/7545.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 7545 - - A warning is emitted when attempting to configure a mapped class within an - inheritance hierarchy where the mapper is not given any polymorphic - identity, however there is a polymorphic discriminator column assigned. - Such classes should be abstract if they never intend to load directly. - diff --git a/doc/build/changelog/unreleased_14/8507.rst b/doc/build/changelog/unreleased_14/8507.rst deleted file mode 100644 index 07944da75da..00000000000 --- a/doc/build/changelog/unreleased_14/8507.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 8507 - - Fixed regression where using ORM update() with synchronize_session='fetch' - would fail due to the use of evaluators that are now used to determine the - in-Python value for expressions in the the SET clause when refreshing - objects; if the evaluators make use of math operators against non-numeric - values such as PostgreSQL JSONB, the non-evaluable condition would fail to - be detected correctly. The evaluator now limits the use of math mutation - operators to numeric types only, with the exception of "+" that continues - to work for strings as well. SQLAlchemy 2.0 may alter this further by - fetching the SET values completely rather than using evaluation. diff --git a/doc/build/changelog/unreleased_14/8516.rst b/doc/build/changelog/unreleased_14/8516.rst deleted file mode 100644 index 2f83586e2a8..00000000000 --- a/doc/build/changelog/unreleased_14/8516.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 8516 - - Improved implementation of ``asyncio.shield()`` used in context managers as - added in :ticket:`8145`, such that the "close" operation is enclosed within - an ``asyncio.Task`` which is then strongly referenced as the operation - proceeds. This is per Python documentation indicating that the task is - otherwise not strongly referenced. diff --git a/doc/build/changelog/unreleased_14/8525.rst b/doc/build/changelog/unreleased_14/8525.rst deleted file mode 100644 index 8508e396b47..00000000000 --- a/doc/build/changelog/unreleased_14/8525.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, mssql, regression - :tickets: 8525 - - Fixed yet another regression in SQL Server isolation level fetch (see - :ticket:`8231`, :ticket:`8475`), this time with "Microsoft Dynamics CRM - Database via Azure Active Directory", which apparently lacks the - ``system_views`` view entirely. Error catching has been extended that under - no circumstances will this method ever fail, provided database connectivity - is present. diff --git a/doc/build/changelog/unreleased_14/8536.rst b/doc/build/changelog/unreleased_14/8536.rst deleted file mode 100644 index d7b5283cdea..00000000000 --- a/doc/build/changelog/unreleased_14/8536.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 8536 - - Fixed issue where mixing "*" with additional explicitly-named column - expressions within the columns clause of a :func:`_sql.select` construct - would cause result-column targeting to sometimes consider the label name or - other non-repeated names to be an ambiguous target. diff --git a/doc/build/changelog/unreleased_14/8569.rst b/doc/build/changelog/unreleased_14/8569.rst deleted file mode 100644 index 5ae6fce091c..00000000000 --- a/doc/build/changelog/unreleased_14/8569.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: orm, bug, regression - :tickets: 8569 - - Fixed regression for 1.4 in :func:`_orm.contains_eager` where the "wrap in - subquery" logic of :func:`_orm.joinedload` would be inadvertently triggered - for use of the :func:`_orm.contains_eager` function with similar statements - (e.g. those that use ``distinct()``, ``limit()`` or ``offset()``), which - would then lead to secondary issues with queries that used some - combinations of SQL label names and aliasing. This "wrapping" is not - appropriate for :func:`_orm.contains_eager` which has always had the - contract that the user-defined SQL statement is unmodified with the - exception of adding the appropriate columns to be fetched. diff --git a/doc/build/changelog/unreleased_14/8574.rst b/doc/build/changelog/unreleased_14/8574.rst deleted file mode 100644 index ffc1761c301..00000000000 --- a/doc/build/changelog/unreleased_14/8574.rst +++ /dev/null @@ -1,5 +0,0 @@ -.. change:: - :tags: usecase, postgresql - :tickets: 8574 - - :class:`_postgresql.aggregate_order_by` now supports cache generation. diff --git a/doc/build/changelog/unreleased_14/8588.rst b/doc/build/changelog/unreleased_14/8588.rst deleted file mode 100644 index 474c14c4fa0..00000000000 --- a/doc/build/changelog/unreleased_14/8588.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 8588 - - Adjusted the regular expression used to match "CREATE VIEW" when - testing for views to work more flexibly, no longer requiring the - special keyword "ALGORITHM" in the middle, which was intended to be - optional but was not working correctly. The change allows view reflection - to work more completely on MySQL-compatible variants such as StarRocks. - Pull request courtesy John Bodley. diff --git a/doc/build/changelog/unreleased_14/8614.rst b/doc/build/changelog/unreleased_14/8614.rst deleted file mode 100644 index b975dbc1f96..00000000000 --- a/doc/build/changelog/unreleased_14/8614.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8614 - - The :paramref:`_orm.Session.execute.bind_arguments` dictionary is no longer - mutated when passed to :meth:`_orm.Session.execute` and similar; instead, - it's copied to an internal dictionary for state changes. Among other - things, this fixes and issue where the "clause" passed to the - :meth:`_orm.Session.get_bind` method would be incorrectly referring to the - :class:`_sql.Select` construct used for the "fetch" synchronization - strategy, when the actual query being emitted was a :class:`_dml.Delete` or - :class:`_dml.Update`. This would interfere with recipes for "routing - sessions". diff --git a/doc/build/conf.py b/doc/build/conf.py index 3a236ffbc7f..d1144b41bbc 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.41" +release = "1.4.42" -release_date = "September 6, 2022" +release_date = "October 16, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 4f72c58f4c9e2cbb5f09d9efae9e91435c52a880 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 16 Oct 2022 10:25:36 -0400 Subject: [PATCH 400/632] Version 1.4.43 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index f82d623a208..3b19d78226a 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.43 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.42 :released: October 16, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 772a60d75ec..2714b994620 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.42" +__version__ = "1.4.43" def __go(lcls): From 924a08d323286dff2f9def67c16b3fbcfd011e52 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 17 Oct 2022 15:09:01 -0400 Subject: [PATCH 401/632] update SEQUENCE docs ahead of default change for backport to 1.4 as well, remove references to Firebird, and also revert "associate Sequence with MetaData" step as this is not needed usually, just note that schema is not shared. encourage users to use IDENTITY instead. Change-Id: I5d25357042127c9cd1274c9de7abb44a525b0195 (cherry picked from commit 665c94cc2f0340735515c4f4477e11b556d2bcd8) --- doc/build/core/defaults.rst | 166 +++++++++++++++++++++++------------- 1 file changed, 105 insertions(+), 61 deletions(-) diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index 60e7e2bc57d..c875808ec70 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -346,9 +346,16 @@ Defining Sequences SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have -explicit support for sequences, which currently includes PostgreSQL, Oracle, -MariaDB 10.3 or greater, and Firebird. The :class:`~sqlalchemy.schema.Sequence` -object is otherwise ignored. +explicit support for sequences, which among SQLAlchemy's included dialects +includes PostgreSQL, Oracle, MS SQL Server, and MariaDB. The +:class:`~sqlalchemy.schema.Sequence` object is otherwise ignored. + +.. tip:: + + In newer database engines, the :class:`.Identity` construct should likely + be preferred vs. :class:`.Sequence` for generation of integer primary key + values. See the section :ref:`identity_ddl` for background on this + construct. The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be @@ -361,47 +368,111 @@ commonly used in conjunction with a single integer primary key column:: Column( "cart_id", Integer, - Sequence("cart_id_seq", metadata=metadata_obj), + Sequence("cart_id_seq", start=1), primary_key=True, ), Column("description", String(40)), Column("createdate", DateTime()), ) -Where above, the table "cartitems" is associated with a sequence named -"cart_id_seq". When INSERT statements take place for "cartitems", and no value -is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to -generate a value. Typically, the sequence function is embedded in the -INSERT statement, which is combined with RETURNING so that the newly generated -value can be returned to the Python code:: +Where above, the table ``cartitems`` is associated with a sequence named +``cart_id_seq``. Emitting :meth:`.MetaData.create_all` for the above +table will include: + +.. sourcecode:: sql + + CREATE SEQUENCE cart_id_seq START WITH 1 + + CREATE TABLE cartitems ( + cart_id INTEGER NOT NULL, + description VARCHAR(40), + createdate TIMESTAMP WITHOUT TIME ZONE, + PRIMARY KEY (cart_id) + ) + +.. tip:: + + When using tables with explicit schema names (detailed at + :ref:`schema_table_schema_name`), the configured schema of the :class:`.Table` + is **not** automatically shared by an embedded :class:`.Sequence`, instead, + specify :paramref:`.Sequence.schema`:: + + Sequence("cart_id_seq", start=1, schema="some_schema") + + The :class:`.Sequence` may also be made to automatically make use of the + :paramref:`.MetaData.schema` setting on the :class:`.MetaData` in use; + see :ref:`sequence_metadata` for background. + +When :class:`.Insert` DML constructs are invoked against the ``cartitems`` +table, without an explicit value passed for the ``cart_id`` column, the +``cart_id_seq`` sequence will be used to generate a value on participating +backends. Typically, the sequence function is embedded in the INSERT statement, +which is combined with RETURNING so that the newly generated value can be +returned to the Python process: + +.. sourcecode:: sql INSERT INTO cartitems (cart_id, description, createdate) VALUES (next_val(cart_id_seq), 'some description', '2015-10-15 12:00:15') RETURNING cart_id +When using :meth:`.Connection.execute` to invoke an :class:`.Insert` construct, +newly generated primary key identifiers, including but not limited to those +generated using :class:`.Sequence`, are available from the :class:`.CursorResult` +construct using the :attr:`.CursorResult.inserted_primary_key` attribute. + When the :class:`~sqlalchemy.schema.Sequence` is associated with a :class:`_schema.Column` as its **Python-side** default generator, the :class:`.Sequence` will also be subject to "CREATE SEQUENCE" and "DROP -SEQUENCE" DDL when similar DDL is emitted for the owning :class:`_schema.Table`. -This is a limited scope convenience feature that does not accommodate for -inheritance of other aspects of the :class:`_schema.MetaData`, such as the default -schema. Therefore, it is best practice that for a :class:`.Sequence` which -is local to a certain :class:`_schema.Column` / :class:`_schema.Table`, that it be -explicitly associated with the :class:`_schema.MetaData` using the -:paramref:`.Sequence.metadata` parameter. See the section -:ref:`sequence_metadata` for more background on this. +SEQUENCE" DDL when similar DDL is emitted for the owning :class:`_schema.Table`, +such as when using :meth:`.MetaData.create_all` to generate DDL for a series +of tables. + +The :class:`.Sequence` may also be associated with a +:class:`.MetaData` construct directly. This allows the :class:`.Sequence` +to be used in more than one :class:`.Table` at a time and also allows the +:paramref:`.MetaData.schema` parameter to be inherited. See the section +:ref:`sequence_metadata` for background. Associating a Sequence on a SERIAL column ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PostgreSQL's SERIAL datatype is an auto-incrementing type that implies the implicit creation of a PostgreSQL sequence when CREATE TABLE is emitted. -If a :class:`_schema.Column` specifies an explicit :class:`.Sequence` object -which also specifies a ``True`` value for the :paramref:`.Sequence.optional` -boolean flag, the :class:`.Sequence` will not take effect under PostgreSQL, -and the SERIAL datatype will proceed normally. Instead, the :class:`.Sequence` -will only take effect when used against other sequence-supporting -databases, currently Oracle and Firebird. +The :class:`.Sequence` construct, when indicated for a :class:`_schema.Column`, +may indicate that it should not be used in this specific case by specifying +a value of ``True`` for the :paramref:`.Sequence.optional` parameter. +This allows the given :class:`.Sequence` to be used for backends that have no +alternative primary key generation system but to ignore it for backends +such as PostgreSQL which will automatically generate a sequence for a particular +column:: + + table = Table( + "cartitems", + metadata_obj, + Column( + "cart_id", + Integer, + # use an explicit Sequence where available, but not on + # PostgreSQL where SERIAL will be used + Sequence("cart_id_seq", start=1, optional=True), + primary_key=True, + ), + Column("description", String(40)), + Column("createdate", DateTime()), + ) + +In the above example, ``CREATE TABLE`` for PostgreSQL will make use of the +``SERIAL`` datatype for the ``cart_id`` column, and the ``cart_id_seq`` +sequence will be ignored. However on Oracle, the ``cart_id_seq`` sequence +will be created explicitly. + +.. tip:: + + This particular interaction of SERIAL and SEQUENCE is fairly legacy, and + as in other cases, using :class:`.Identity` instead will simplify the + operation to simply use ``IDENTITY`` on all supported backends. + Executing a Sequence Standalone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -430,43 +501,26 @@ appropriate for the target backend:: Associating a Sequence with the MetaData ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -For many years, the SQLAlchemy documentation referred to the -example of associating a :class:`.Sequence` with a table as follows:: +For a :class:`.Sequence` that is to be associated with arbitrary +:class:`.Table` objects, the :class:`.Sequence` may be associated with +a particular :class:`_schema.MetaData`, using the +:paramref:`.Sequence.metadata` parameter:: - table = Table( - "cartitems", - metadata_obj, - Column("cart_id", Integer, Sequence("cart_id_seq"), primary_key=True), - Column("description", String(40)), - Column("createdate", DateTime()), - ) + seq = Sequence("my_general_seq", metadata=metadata_obj, start=1) -While the above is a prominent idiomatic pattern, it is recommended that -the :class:`.Sequence` in most cases be explicitly associated with the -:class:`_schema.MetaData`, using the :paramref:`.Sequence.metadata` parameter:: +Such a sequence can then be associated with columns in the usual way:: table = Table( "cartitems", metadata_obj, - Column( - "cart_id", - Integer, - Sequence("cart_id_seq", metadata=metadata_obj), - primary_key=True, - ), + seq, Column("description", String(40)), Column("createdate", DateTime()), ) -The :class:`.Sequence` object is a first class -schema construct that can exist independently of any table in a database, and -can also be shared among tables. Therefore SQLAlchemy does not implicitly -modify the :class:`.Sequence` when it is associated with a :class:`_schema.Column` -object as either the Python-side or server-side default generator. While the -CREATE SEQUENCE / DROP SEQUENCE DDL is emitted for a :class:`.Sequence` -defined as a Python side generator at the same time the table itself is subject -to CREATE or DROP, this is a convenience feature that does not imply that the -:class:`.Sequence` is fully associated with the :class:`_schema.MetaData` object. +In the above example, the :class:`.Sequence` object is treated as an +independent schema construct that can exist on its own or be shared among +tables. Explicitly associating the :class:`.Sequence` with :class:`_schema.MetaData` allows for the following behaviors: @@ -475,9 +529,6 @@ allows for the following behaviors: parameter specified to the target :class:`_schema.MetaData`, which affects the production of CREATE / DROP DDL, if any. -* The :meth:`.Sequence.create` and :meth:`.Sequence.drop` methods - automatically use the engine bound to the :class:`_schema.MetaData` - object, if any. * The :meth:`_schema.MetaData.create_all` and :meth:`_schema.MetaData.drop_all` methods will emit CREATE / DROP for this :class:`.Sequence`, @@ -485,11 +536,6 @@ allows for the following behaviors: :class:`_schema.Table` / :class:`_schema.Column` that's a member of this :class:`_schema.MetaData`. -Since the vast majority of cases that deal with :class:`.Sequence` expect -that :class:`.Sequence` to be fully "owned" by the associated :class:`_schema.Table` -and that options like default schema are propagated, setting the -:paramref:`.Sequence.metadata` parameter should be considered a best practice. - Associating a Sequence as the Server Side Default ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -655,8 +701,6 @@ eagerly fetched. * SQLite as of version 3.31 -* Firebird - When :class:`.Computed` is used with an unsupported backend, if the target dialect does not support it, a :class:`.CompileError` is raised when attempting to render the construct. Otherwise, if the dialect supports it but the From 51001683c5d3fec428f816381143fec2cb362e99 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Oct 2022 09:23:21 -0400 Subject: [PATCH 402/632] rename tables to _table for basic relationships the names "parent" / "child" are confusing for new users in that they are used for table names as well as "back_populates='parent'", use a disambiguated name. In this change, there's now overlap between the variable named "association_table" and the table name "association_table". not sure of a better naming system. Change-Id: Ic036c8072caf6e9e5fbd1178986353c00b91f43d References: https://github.com/sqlalchemy/sqlalchemy/discussions/8675#discussioncomment-3915204 --- doc/build/orm/basic_relationships.rst | 132 +++++++++++++------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index 6ca4de39c61..dae04081eaf 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -22,30 +22,30 @@ the parent. :func:`_orm.relationship` is then specified on the parent, as refer a collection of items represented by the child:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child") class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey("parent.id")) + parent_id = Column(Integer, ForeignKey("parent_table.id")) To establish a bidirectional relationship in one-to-many, where the "reverse" side is a many to one, specify an additional :func:`_orm.relationship` and connect the two using the :paramref:`_orm.relationship.back_populates` parameter:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child", back_populates="parent") class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey("parent.id")) + parent_id = Column(Integer, ForeignKey("parent_table.id")) parent = relationship("Parent", back_populates="children") ``Child`` will get a ``parent`` attribute with many-to-one semantics. @@ -55,7 +55,7 @@ on a single :func:`_orm.relationship` instead of using :paramref:`_orm.relationship.back_populates`:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) children = relationship("Child", backref="parent") @@ -88,14 +88,14 @@ Many to one places a foreign key in the parent table referencing the child. attribute will be created:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey("child.id")) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child") class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) Bidirectional behavior is achieved by adding a second :func:`_orm.relationship` @@ -103,14 +103,14 @@ and applying the :paramref:`_orm.relationship.back_populates` parameter in both directions:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey("child.id")) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child", back_populates="parents") class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) parents = relationship("Parent", back_populates="child") @@ -118,9 +118,9 @@ Alternatively, the :paramref:`_orm.relationship.backref` parameter may be applied to a single :func:`_orm.relationship`, such as ``Parent.child``:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) - child_id = Column(Integer, ForeignKey("child.id")) + child_id = Column(Integer, ForeignKey("child_table.id")) child = relationship("Child", backref="parents") .. _relationships_one_to_one: @@ -145,7 +145,7 @@ a :ref:`many-to-one ` (``Child.parent``) relationships:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) # one-to-many collection @@ -153,9 +153,9 @@ relationships:: class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey("parent.id")) + parent_id = Column(Integer, ForeignKey("parent_table.id")) # many-to-one scalar parent = relationship("Parent", back_populates="children") @@ -167,7 +167,7 @@ is converted into a scalar relationship using the ``uselist=False`` flag, renaming ``Parent.children`` to ``Parent.child`` for clarity:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) # previously one-to-many Parent.children is now @@ -176,9 +176,9 @@ renaming ``Parent.children`` to ``Parent.child`` for clarity:: class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey("parent.id")) + parent_id = Column(Integer, ForeignKey("parent_table.id")) # many-to-one side remains, see tip below parent = relationship("Parent", back_populates="child") @@ -218,14 +218,14 @@ in this case the ``uselist`` parameter:: class Parent(Base): - __tablename__ = "parent" + __tablename__ = "parent_table" id = Column(Integer, primary_key=True) class Child(Base): - __tablename__ = "child" + __tablename__ = "child_table" id = Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey("parent.id")) + parent_id = Column(Integer, ForeignKey("parent_table.id")) parent = relationship("Parent", backref=backref("child", uselist=False)) .. _relationships_many_to_many: @@ -241,21 +241,21 @@ class, so that the :class:`_schema.ForeignKey` directives can locate the remote tables with which to link:: association_table = Table( - "association", + "association_table", Base.metadata, - Column("left_id", ForeignKey("left.id")), - Column("right_id", ForeignKey("right.id")), + Column("left_id", ForeignKey("left_table.id")), + Column("right_id", ForeignKey("right_table.id")), ) class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table) class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) .. tip:: @@ -271,10 +271,10 @@ remote tables with which to link:: of issues on the application side:: association_table = Table( - "association", + "association_table", Base.metadata, - Column("left_id", ForeignKey("left.id"), primary_key=True), - Column("right_id", ForeignKey("right.id"), primary_key=True), + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) For a bidirectional relationship, both sides of the relationship contain a @@ -282,15 +282,15 @@ collection. Specify using :paramref:`_orm.relationship.back_populates`, and for each :func:`_orm.relationship` specify the common association table:: association_table = Table( - "association", + "association_table", Base.metadata, - Column("left_id", ForeignKey("left.id"), primary_key=True), - Column("right_id", ForeignKey("right.id"), primary_key=True), + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship( "Child", secondary=association_table, back_populates="parents" @@ -298,7 +298,7 @@ for each :func:`_orm.relationship` specify the common association table:: class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) parents = relationship( "Parent", secondary=association_table, back_populates="children" @@ -310,21 +310,21 @@ use the same :paramref:`_orm.relationship.secondary` argument for the reverse relationship:: association_table = Table( - "association", + "association_table", Base.metadata, - Column("left_id", ForeignKey("left.id"), primary_key=True), - Column("right_id", ForeignKey("right.id"), primary_key=True), + Column("left_id", ForeignKey("left_table.id"), primary_key=True), + Column("right_id", ForeignKey("right_table.id"), primary_key=True), ) class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Child", secondary=association_table, backref="parents") class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) The :paramref:`_orm.relationship.secondary` argument of @@ -334,7 +334,7 @@ can define the ``association_table`` at a later point, as long as it's available to the callable after all module initialization is complete:: class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship( "Child", @@ -346,9 +346,9 @@ With the declarative extension in use, the traditional "string name of the table is accepted as well, matching the name of the table as stored in ``Base.metadata.tables``:: class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", secondary="association", backref="parents") + children = relationship("Child", secondary="association_table", backref="parents") .. warning:: When passed as a Python-evaluable string, the :paramref:`_orm.relationship.secondary` argument is interpreted using Python's @@ -434,43 +434,43 @@ is stored along with each association between ``Parent`` and ``Child``:: class Association(Base): - __tablename__ = "association" - left_id = Column(ForeignKey("left.id"), primary_key=True) - right_id = Column(ForeignKey("right.id"), primary_key=True) + __tablename__ = "association_table" + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child") class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Association") class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) As always, the bidirectional version makes use of :paramref:`_orm.relationship.back_populates` or :paramref:`_orm.relationship.backref`:: class Association(Base): - __tablename__ = "association" - left_id = Column(ForeignKey("left.id"), primary_key=True) - right_id = Column(ForeignKey("right.id"), primary_key=True) + __tablename__ = "association_table" + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", back_populates="parents") parent = relationship("Parent", back_populates="children") class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) children = relationship("Association", back_populates="parent") class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) parents = relationship("Association", back_populates="child") @@ -511,10 +511,10 @@ associated object, and a second to a target attribute. after :meth:`.Session.commit`:: class Association(Base): - __tablename__ = "association" + __tablename__ = "association_table" - left_id = Column(ForeignKey("left.id"), primary_key=True) - right_id = Column(ForeignKey("right.id"), primary_key=True) + left_id = Column(ForeignKey("left_table.id"), primary_key=True) + right_id = Column(ForeignKey("right_table.id"), primary_key=True) extra_data = Column(String(50)) child = relationship("Child", backref="parent_associations") @@ -522,14 +522,14 @@ associated object, and a second to a target attribute. class Parent(Base): - __tablename__ = "left" + __tablename__ = "left_table" id = Column(Integer, primary_key=True) - children = relationship("Child", secondary="association") + children = relationship("Child", secondary="association_table") class Child(Base): - __tablename__ = "right" + __tablename__ = "right_table" id = Column(Integer, primary_key=True) Additionally, just as changes to one relationship aren't reflected in the @@ -741,17 +741,17 @@ declarative base and its :class:`_orm.registry`. We can then refer to this parameter:: keyword_author = Table( - "keyword_author", + "keyword_author_table", Base.metadata, - Column("author_id", Integer, ForeignKey("authors.id")), - Column("keyword_id", Integer, ForeignKey("keywords.id")), + Column("author_id", Integer, ForeignKey("authors_table.id")), + Column("keyword_id", Integer, ForeignKey("keywords_table.id")), ) class Author(Base): - __tablename__ = "authors" + __tablename__ = "authors_table" id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary="keyword_author") + keywords = relationship("Keyword", secondary="keyword_author_table") For additional detail on many-to-many relationships see the section :ref:`relationships_many_to_many`. From 4de4df4afeb00ce3d8f8fa07d0ec70e420cd1630 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Oct 2022 12:28:29 -0400 Subject: [PATCH 403/632] move API docs downwards Sphinx 5.3 (compared to 5.1.1) is now putting all the autodoc names into the TOC. So we have to start being more careful to make sure API docs are well below narrative docs, because this new style is a wall of text. i dont yet see any options to turn it off, but it does seem like a good improvement, just makes doc organization a more difficult endeavor. Change-Id: I49428076fef9b96ef1544621de9a9dfca1699dab (cherry picked from commit b3e1fe7577efa799821a1e3ab6321d712fbfaab6) --- doc/build/core/expression_api.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/expression_api.rst b/doc/build/core/expression_api.rst index 236e0e2ee75..6df14f372cb 100644 --- a/doc/build/core/expression_api.rst +++ b/doc/build/core/expression_api.rst @@ -12,7 +12,6 @@ see :ref:`sqlexpression_toplevel`. .. toctree:: :maxdepth: 3 - foundation sqlelement operators selectable @@ -20,4 +19,5 @@ see :ref:`sqlexpression_toplevel`. functions compiler serializer + foundation visitors From db0a2ab4d25301b563e846753747d2dc52cdc4c3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 21 Oct 2022 09:26:26 -0400 Subject: [PATCH 404/632] fix missing pool __init__ documentation Change-Id: Ic3e7fb3cc4995372646822e40d914b83a7fa78c8 (cherry picked from commit 2fc5cf56a4b146b94b5dd14239a791e354d7ebe4) --- doc/build/core/pooling.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index b8800ead4af..138feace286 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -591,25 +591,32 @@ API Documentation - Available Pool Implementations .. autoclass:: sqlalchemy.pool.Pool + .. automethod:: __init__ .. automethod:: connect .. automethod:: dispose .. automethod:: recreate .. autoclass:: sqlalchemy.pool.QueuePool + .. automethod:: __init__ .. automethod:: connect .. autoclass:: SingletonThreadPool + .. automethod:: __init__ .. autoclass:: AssertionPool + .. automethod:: __init__ .. autoclass:: NullPool + .. automethod:: __init__ .. autoclass:: StaticPool + .. automethod:: __init__ + .. autoclass:: _ConnectionFairy :members: From d1b2e9353ad9e2c0db362262964403a5601fa1e2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 21 Oct 2022 12:44:18 -0400 Subject: [PATCH 405/632] add step 6 to migration docs Users will need to attend to explicitly annotated models that don't use Mapped[], for a clean transition from 1.4 to 2.0. Fixes: #8692 Change-Id: I212018574e752d1109c712ea29ea277be5a13382 --- doc/build/changelog/migration_20.rst | 80 ++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 4e738e948e4..b33a5b0e676 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -378,6 +378,86 @@ and all ``exc.RemovedIn20Warning`` occurrences set to raise an error, The sections that follow will detail the specific changes to make for all major API modifications. +.. _migration_20_step_six: + +Migration to 2.0 Step Six - Add ``__allow_unmapped__`` to explicitly typed ORM models +-------------------------------------------------------------------------------------- + +SQLAlchemy 2.0 has new support for runtime interpretation of :pep:`484` typing annotations +on ORM models. A requirement of these annotations is that they must make use +of the :class:`_orm.Mapped` generic container. Annotations which don't use +:class:`_orm.Mapped` which link to constructs such as :func:`_orm.relationship` +will raise errors, as they suggest mis-configurations. + +SQLAlchemy applications that use the :ref:`Mypy plugin ` with +explicit annotations that don't use :class:`_orm.Mapped` in their annotations +are subject to these errors, as would occur in the example below:: + + Base = declarative_base() + + + class Foo(Base): + __tablename__ = "foo" + + id: int = Column(Integer, primary_key=True) + + # will raise + bars: list["Bar"] = relationship("Bar", back_populates="foo") + + + class Bar(Base): + __tablename__ = "bar" + + id: int = Column(Integer, primary_key=True) + foo_id = Column(ForeignKey("foo.id")) + + # will raise + foo: Foo = relationship(Foo, back_populates="bars", cascade="all") + +Above, the ``Foo.bars`` and ``Bar.foo`` :func:`_orm.relationship` declarations +will raise an error at class construction time because they don't use +:class:`_orm.Mapped` (by contrast, the annotations that use +:class:`_schema.Column` are ignored by 2.0, as these are able to be +recognized as a legacy configuration style). To allow all annotations that +don't use :class:`_orm.Mapped` to pass without error, +the ``__allow_unmapped__`` attribute may be used on the class or any +subclasses, which will cause the annotations in these cases to be +ignored completely by the new Declarative system. + +The example below illustrates the application of ``__allow_unmapped__`` +to the Declarative ``Base`` class, where it will take effect for all classes +that descend from ``Base``:: + + # qualify the base with __allow_unmapped__. Can also be + # applied to classes directly if preferred + class Base: + __allow_unmapped__ = True + + + Base = declarative_base(cls=Base) + + # existing mapping proceeds, Declarative will ignore any annotations + # which don't include ``Mapped[]`` + class Foo(Base): + __tablename__ = "foo" + + id: int = Column(Integer, primary_key=True) + + bars: list["Bar"] = relationship("Bar", back_populates="foo") + + + class Bar(Base): + __tablename__ = "bar" + + id: int = Column(Integer, primary_key=True) + foo_id = Column(ForeignKey("foo.id")) + + foo: Foo = relationship(Foo, back_populates="bars", cascade="all") + +.. versionchanged:: 2.0.0beta3 - improved the ``__allow_unmapped__`` + attribute support to allow for 1.4-style explicit annotated relationships + that don't use :class:`_orm.Mapped` to remain usable. + 2.0 Migration - Core Connection / Transaction ============================================= From f710836488162518dcf2dc1006d90ecd77a2a178 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Oct 2022 10:34:33 -0400 Subject: [PATCH 406/632] test support for has_table()->view; backport to 1.4 For 1.4 only; in 2.0 this just refines the test suite a bit. Fixed regression which occurred throughout the 1.4 series where the :meth:`.Inspector.has_table` method, which historically reported on views as well, stopped working for SQL Server. The method never worked for Oracle in this way, so for compatibility within the 1.4 series, Oracle's dialect remains returning False for ``has_table()`` against a view within the 1.4 series. The issue is not present in the 2.0 series which uses a different reflection architecture, where has_table() reports True for views on all backends including SQL Server and Oracle. Test support is added within the 1.4 series to ensure ``has_table()`` remains working per spec re: views. Fixes: #8700 Change-Id: I119a91ec07911edb08cf0799234827fec9ea1195 (cherry picked from commit c02f6b744d304578fe67da2e13d2c02ab71140d2) --- doc/build/changelog/unreleased_14/8700.rst | 15 +++++++ lib/sqlalchemy/dialects/mssql/base.py | 7 +-- .../testing/suite/test_reflection.py | 43 +++++++++++++++++++ 3 files changed, 60 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8700.rst diff --git a/doc/build/changelog/unreleased_14/8700.rst b/doc/build/changelog/unreleased_14/8700.rst new file mode 100644 index 00000000000..b9369e038bd --- /dev/null +++ b/doc/build/changelog/unreleased_14/8700.rst @@ -0,0 +1,15 @@ +.. change:: + :tags: bug, mssql, reflection + :tickets: 8700 + + Fixed regression which occurred throughout the 1.4 series where the + :meth:`.Inspector.has_table` method, which historically reported on views + as well, stopped working for SQL Server. The method never worked for + Oracle in this way, so for compatibility within the 1.4 series, + Oracle's dialect remains returning False for ``has_table()`` against a + view within the 1.4 series. + + The issue is not present in the 2.0 series which uses a different + reflection architecture, where has_table() reports True for views on all + backends including SQL Server and Oracle. Test support is added within the + 1.4 series to ensure ``has_table()`` remains working per spec re: views. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 0c967b51670..738ff7ce34a 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2959,11 +2959,8 @@ def has_table(self, connection, tablename, dbname, owner, schema): else: tables = ischema.tables - s = sql.select(tables.c.table_name).where( - sql.and_( - tables.c.table_type == "BASE TABLE", - tables.c.table_name == tablename, - ) + s = sql.select(tables.c.table_name, tables.c.table_type).where( + tables.c.table_name == tablename, ) if owner: diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 459a4d8211c..ff98f18c073 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -53,6 +53,28 @@ def define_tables(cls, metadata): schema=config.test_schema, ) + if testing.requires.view_reflection: + cls.define_views(metadata) + + @classmethod + def define_views(cls, metadata): + query = "CREATE VIEW vv AS SELECT * FROM test_table" + + event.listen(metadata, "after_create", DDL(query)) + event.listen(metadata, "before_drop", DDL("DROP VIEW vv")) + + if testing.requires.schemas.enabled: + query = "CREATE VIEW %s.vv AS SELECT * FROM %s.test_table_s" % ( + config.test_schema, + config.test_schema, + ) + event.listen(metadata, "after_create", DDL(query)) + event.listen( + metadata, + "before_drop", + DDL("DROP VIEW %s.vv" % (config.test_schema)), + ) + def test_has_table(self): with config.db.begin() as conn: is_true(config.db.dialect.has_table(conn, "test_table")) @@ -78,6 +100,27 @@ def test_has_table_schema(self): ) ) + @testing.fails_on( + "oracle", + "per #8700 this remains at its previous behavior of not " + "working within 1.4.", + ) + @testing.requires.views + def test_has_table_view(self, connection): + insp = inspect(connection) + is_true(insp.has_table("vv")) + + @testing.fails_on( + "oracle", + "per #8700 this remains at its previous behavior of not " + "working within 1.4", + ) + @testing.requires.views + @testing.requires.schemas + def test_has_table_view_schema(self, connection): + insp = inspect(connection) + is_true(insp.has_table("vv", config.test_schema)) + class HasIndexTest(fixtures.TablesTest): __backend__ = True From b3525904ba8abe323fcd84e1b3674bea4274a59c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Oct 2022 19:24:54 -0400 Subject: [PATCH 407/632] skip ad-hoc properties within subclass_load_via_in Fixed issue where "selectin_polymorphic" loading for inheritance mappers would not function correctly if the :param:`_orm.Mapper.polymorphic_on` parameter referred to a SQL expression that was not directly mapped on the class. Fixes: #8704 Change-Id: I1b6be2650895fd18d2c804f6ba96de966d11041a (cherry picked from commit bd1777426255648215328252795dff24dfd08616) --- doc/build/changelog/unreleased_14/8704.rst | 8 ++ lib/sqlalchemy/orm/mapper.py | 13 ++- test/orm/inheritance/test_poly_loading.py | 109 +++++++++++++++++++++ 3 files changed, 128 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8704.rst diff --git a/doc/build/changelog/unreleased_14/8704.rst b/doc/build/changelog/unreleased_14/8704.rst new file mode 100644 index 00000000000..7327c95313e --- /dev/null +++ b/doc/build/changelog/unreleased_14/8704.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 8704 + + Fixed issue where "selectin_polymorphic" loading for inheritance mappers + would not function correctly if the :param:`_orm.Mapper.polymorphic_on` + parameter referred to a SQL expression that was not directly mapped on the + class. diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 15eae2dac09..97509515b8a 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -3175,6 +3175,13 @@ def _subclass_load_via_in(self, entity): enable_opt = strategy_options.Load(entity) for prop in self.attrs: + + # skip prop keys that are not instrumented on the mapped class. + # this is primarily the "_sa_polymorphic_on" property that gets + # created for an ad-hoc polymorphic_on SQL expression, issue #8704 + if prop.key not in self.class_manager: + continue + if prop.parent is self or prop in keep_props: # "enable" options, to turn on the properties that we want to # load by default (subject to options from the query) @@ -3183,7 +3190,8 @@ def _subclass_load_via_in(self, entity): enable_opt.set_generic_strategy( # convert string name to an attribute before passing - # to loader strategy + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. (getattr(entity.entity_namespace, prop.key),), dict(prop.strategy_key), ) @@ -3193,7 +3201,8 @@ def _subclass_load_via_in(self, entity): # the options from the query to override them disable_opt.set_generic_strategy( # convert string name to an attribute before passing - # to loader strategy + # to loader strategy. note this must be in terms + # of given entity, such as AliasedClass, etc. (getattr(entity.entity_namespace, prop.key),), {"do_nothing": True}, ) diff --git a/test/orm/inheritance/test_poly_loading.py b/test/orm/inheritance/test_poly_loading.py index 31e5e4ca906..fc5743a7330 100644 --- a/test/orm/inheritance/test_poly_loading.py +++ b/test/orm/inheritance/test_poly_loading.py @@ -8,6 +8,7 @@ from sqlalchemy import testing from sqlalchemy import union from sqlalchemy.orm import backref +from sqlalchemy.orm import column_property from sqlalchemy.orm import composite from sqlalchemy.orm import defaultload from sqlalchemy.orm import immediateload @@ -1180,3 +1181,111 @@ def test_load_composite(self, mapping_fixture, connection): B(id=2, thing2="thing2", comp2=XYThing(3, 4)), ], ) + + +class PolymorphicOnExprTest( + testing.AssertsExecutionResults, fixtures.TestBase +): + """test for #8704""" + + @testing.fixture() + def poly_fixture(self, connection, decl_base): + def fixture(create_prop, use_load): + class TypeTable(decl_base): + __tablename__ = "type" + + id = Column(Integer, primary_key=True) + name = Column(String(30)) + + class PolyBase(ComparableEntity, decl_base): + __tablename__ = "base" + + id = Column(Integer, primary_key=True) + type_id = Column(ForeignKey(TypeTable.id)) + + if create_prop == "create_prop": + polymorphic = column_property( + select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + ) + __mapper_args__ = { + "polymorphic_on": polymorphic, + } + elif create_prop == "dont_create_prop": + __mapper_args__ = { + "polymorphic_on": select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + } + elif create_prop == "arg_level_prop": + __mapper_args__ = { + "polymorphic_on": column_property( + select(TypeTable.name) + .where(TypeTable.id == type_id) + .scalar_subquery() + ) + } + + class Foo(PolyBase): + __tablename__ = "foo" + + if use_load == "use_polymorphic_load": + __mapper_args__ = { + "polymorphic_identity": "foo", + "polymorphic_load": "selectin", + } + else: + __mapper_args__ = { + "polymorphic_identity": "foo", + } + + id = Column(ForeignKey(PolyBase.id), primary_key=True) + foo_attr = Column(String(30)) + + decl_base.metadata.create_all(connection) + + with Session(connection) as session: + foo_type = TypeTable(name="foo") + session.add(foo_type) + session.flush() + + foo = Foo(type_id=foo_type.id, foo_attr="foo value") + session.add(foo) + + session.commit() + + return PolyBase, Foo, TypeTable + + yield fixture + + @testing.combinations( + "create_prop", + "dont_create_prop", + "arg_level_prop", + argnames="create_prop", + ) + @testing.combinations( + "use_polymorphic_load", + "use_loader_option", + "none", + argnames="use_load", + ) + def test_load_selectin( + self, poly_fixture, connection, create_prop, use_load + ): + PolyBase, Foo, TypeTable = poly_fixture(create_prop, use_load) + + sess = Session(connection) + + foo_type = sess.scalars(select(TypeTable)).one() + + stmt = select(PolyBase) + if use_load == "use_loader_option": + stmt = stmt.options(selectin_polymorphic(PolyBase, [Foo])) + obj = sess.scalars(stmt).all() + + def go(): + eq_(obj, [Foo(type_id=foo_type.id, foo_attr="foo value")]) + + self.assert_sql_count(testing.db, go, 0 if use_load != "none" else 1) From 922f11f6f26c5ba0481a12d7a650823f922be57c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 24 Oct 2022 19:24:11 -0400 Subject: [PATCH 408/632] add Oracle-specific parameter escapes for expanding params Fixed issue where bound parameter names, including those automatically derived from similarly-named database columns, which contained characters that normally require quoting with Oracle would not be escaped when using "expanding parameters" with the Oracle dialect, causing execution errors. The usual "quoting" for bound parameters used by the Oracle dialect is not used with the "expanding parameters" architecture, so escaping for a large range of characters is used instead, now using a list of characters/escapes that are specific to Oracle. Fixes: #8708 Change-Id: I90c24e48534e1b3a4c222b3022da58159784d91a (cherry picked from commit b1cd6e4295b07e01983deb2845f6e22a059f5b76) --- doc/build/changelog/unreleased_14/8708.rst | 14 ++++++++ lib/sqlalchemy/dialects/oracle/cx_oracle.py | 36 ++++++++++++++++++++ lib/sqlalchemy/testing/suite/test_dialect.py | 9 +++++ 3 files changed, 59 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/8708.rst diff --git a/doc/build/changelog/unreleased_14/8708.rst b/doc/build/changelog/unreleased_14/8708.rst new file mode 100644 index 00000000000..bb7424faaf4 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8708.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, oracle + :tickets: 8708 + + Fixed issue where bound parameter names, including those automatically + derived from similarly-named database columns, which contained characters + that normally require quoting with Oracle would not be escaped when using + "expanding parameters" with the Oracle dialect, causing execution errors. + The usual "quoting" for bound parameters used by the Oracle dialect is not + used with the "expanding parameters" architecture, so escaping for a large + range of characters is used instead, now using a list of characters/escapes + that are specific to Oracle. + + diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 64029a47966..20afff656d3 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -471,6 +471,15 @@ def _remove_clob(inputsizes, cursor, statement, parameters, context): from ...util import compat +_ORACLE_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]\.\/\?]") + +# Oracle bind names can't start with digits or underscores. +# currently we rely upon Oracle-specific quoting of bind names in most cases. +# however for expanding params, the escape chars are used. +# see #8708 +_ORACLE_BIND_TRANSLATE_CHARS = dict(zip("%():[]./?", "PAZCCCCCCC")) + + class _OracleInteger(sqltypes.Integer): def get_dbapi_type(self, dbapi): # see https://github.com/oracle/python-cx_Oracle/issues/ @@ -701,6 +710,10 @@ def bindparam_string(self, name, **kw): quote is True or quote is not False and self.preparer._bindparam_requires_quotes(name) + # bind param quoting for Oracle doesn't work with post_compile + # params. For those, the default bindparam_string will escape + # special chars, and the appending of a number "_1" etc. will + # take care of reserved words and not kw.get("post_compile", False) ): # interesting to note about expanding parameters - since the @@ -711,6 +724,29 @@ def bindparam_string(self, name, **kw): quoted_name = '"%s"' % name kw["escaped_from"] = name name = quoted_name + return OracleCompiler.bindparam_string(self, name, **kw) + + # TODO: we could likely do away with quoting altogether for + # Oracle parameters and use the custom escaping here + escaped_from = kw.get("escaped_from", None) + if not escaped_from: + + if _ORACLE_BIND_TRANSLATE_RE.search(name): + # not quite the translate use case as we want to + # also get a quick boolean if we even found + # unusual characters in the name + new_name = _ORACLE_BIND_TRANSLATE_RE.sub( + lambda m: _ORACLE_BIND_TRANSLATE_CHARS[m.group(0)], + name, + ) + if new_name[0].isdigit(): + new_name = "D" + new_name + kw["escaped_from"] = name + name = new_name + elif name[0].isdigit(): + new_name = "D" + name + kw["escaped_from"] = name + name = new_name return OracleCompiler.bindparam_string(self, name, **kw) diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index c2c17d0ddd1..54acc7ec4b9 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -325,6 +325,8 @@ class DifficultParametersTest(fixtures.TestBase): ("par(ens)",), ("percent%(ens)yah",), ("col:ons",), + ("_starts_with_underscore",), + ("dot.s",), ("more :: %colons%",), ("/slashes/",), ("more/slashes",), @@ -359,3 +361,10 @@ def test_round_trip(self, name, connection, metadata): # name works as the key from cursor.description eq_(row._mapping[name], "some name") + + # use expanding IN + stmt = select(t.c[name]).where( + t.c[name].in_(["some name", "some other_name"]) + ) + + row = connection.execute(stmt).first() From dbae24ab4c5f1e02e81d7211c94e1c7d9fc3562f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Oct 2022 10:22:14 -0400 Subject: [PATCH 409/632] raise for non-Load opt passed to options() Fixed the exception that's raised when the :func:`_orm.with_loader_criteria` option is attempted to be used within a specific loader path, like in loader.options(). :func:`_orm.with_loader_criteria` is only intended to be used at the top level. Fixes: #8711 Change-Id: Iaa7b13956b808761e618a6be6406e5c82df1c65c (cherry picked from commit eae9d1420bbfde4dbd835b654e80653cd5ac2155) --- doc/build/changelog/unreleased_14/8711.rst | 9 +++++++++ lib/sqlalchemy/orm/strategy_options.py | 14 +++++++++++++- test/orm/test_relationship_criteria.py | 17 +++++++++++++++++ 3 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8711.rst diff --git a/doc/build/changelog/unreleased_14/8711.rst b/doc/build/changelog/unreleased_14/8711.rst new file mode 100644 index 00000000000..82e68bbc439 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8711.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 8711 + + Fixed the exception that's raised when the + :func:`_orm.with_loader_criteria` option is attempted to be used within a + specific loader path, like in loader.options(). + :func:`_orm.with_loader_criteria` is only intended to be used at the top + level. diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 1b5e762eb27..ce67286ee0d 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -518,7 +518,19 @@ def options(self, *opts): "for 'unbound' loader options" ) for opt in opts: - opt._apply_to_parent(self, apply_cache, bound) + try: + opt._apply_to_parent(self, apply_cache, bound) + except AttributeError as ae: + if not isinstance(opt, Load): + util.raise_( + sa_exc.ArgumentError( + "Loader option %s is not compatible with the " + "Load.options() method." % (opt,) + ), + from_=ae, + ) + else: + raise @_generative def set_relationship_strategy( diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 7a347cd55b9..97650f53c76 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -26,6 +26,7 @@ from sqlalchemy.orm import with_loader_criteria from sqlalchemy.orm.decl_api import declared_attr from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session @@ -231,6 +232,22 @@ def test_select_mapper_mapper_criteria(self, user_address_fixture): "FROM users WHERE users.name != :name_1", ) + def test_err_given_in_pathed(self, user_address_fixture): + User, Address = user_address_fixture + + with expect_raises_message( + sa_exc.ArgumentError, + r"Loader option <.*LoaderCriteriaOption.*> is not compatible " + r"with the Load.options\(\) method.", + ): + select(User).options( + selectinload(User.addresses).options( + with_loader_criteria( + Address, Address.email_address != "foo" + ) + ) + ) + def test_criteria_post_replace(self, user_address_fixture): User, Address = user_address_fixture From 92468b1806f8e3dfd1e1b13ba2c85aa936704339 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 26 Oct 2022 22:59:51 -0400 Subject: [PATCH 410/632] ensure _ORMJoin transfers parententity from left side Fixed bug involving :class:`.Select` constructs which used a combination of :meth:`.Select.select_from` with an ORM entity followed by :meth:`.Select.join` against the entity sent in :meth:`.Select.select_from`, as well as using plain :meth:`.Select.join_from`, which when combined with a columns clause that didn't explicitly include that entity would then cause "automatic WHERE criteria" features such as the IN expression required for a single-table inheritance subclass, as well as the criteria set up by the :func:`_orm.with_loader_criteria` option, to not be rendered for that entity. The correct entity is now transferred to the :class:`.Join` object that's generated internally, so that the criteria against the left side entity is correctly added. Fixes: #8721 Change-Id: I8266430063e2c72071b7262fdd5ec5079fbcba3e (cherry picked from commit 54ecc0fde9851f551c6e467b58d5bf7c4135e0ba) --- doc/build/changelog/unreleased_14/8721.rst | 17 +++ lib/sqlalchemy/orm/context.py | 1 + lib/sqlalchemy/orm/util.py | 31 ++++- test/orm/inheritance/test_single.py | 119 ++++++++++++++++++ test/orm/test_relationship_criteria.py | 139 +++++++++++++++++++++ 5 files changed, 302 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8721.rst diff --git a/doc/build/changelog/unreleased_14/8721.rst b/doc/build/changelog/unreleased_14/8721.rst new file mode 100644 index 00000000000..e6d7f4bf4cc --- /dev/null +++ b/doc/build/changelog/unreleased_14/8721.rst @@ -0,0 +1,17 @@ +.. change:: + :tags: bug, orm + :tickets: 8721 + + Fixed bug involving :class:`.Select` constructs which used a combination of + :meth:`.Select.select_from` with an ORM entity followed by + :meth:`.Select.join` against the entity sent in + :meth:`.Select.select_from`, as well as using plain + :meth:`.Select.join_from`, which when combined with a columns clause that + didn't explicitly include that entity would then cause "automatic WHERE + criteria" features such as the IN expression required for a single-table + inheritance subclass, as well as the criteria set up by the + :func:`_orm.with_loader_criteria` option, to not be rendered for that + entity. The correct entity is now transferred to the :class:`.Join` object + that's generated internally, so that the criteria against the left + side entity is correctly added. + diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 379b65ac7e9..62c553d0bfe 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -2253,6 +2253,7 @@ def _adjust_for_extra_criteria(self): for fromclause in self.from_clauses: ext_info = fromclause._annotations.get("parententity", None) + if ( ext_info and ( diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 6f3278ed789..265f62660f8 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1774,6 +1774,24 @@ def __init__( self._target_adapter = target_adapter + # we don't use the normal coercions logic for _ORMJoin + # (probably should), so do some gymnastics to get the entity. + # logic here is for #8721, which was a major bug in 1.4 + # for almost two years, not reported/fixed until 1.4.43 (!) + if left_info.is_selectable: + parententity = left_selectable._annotations.get( + "parententity", None + ) + elif left_info.is_mapper or left_info.is_aliased_class: + parententity = left_info + else: + parententity = None + + if parententity is not None: + self._annotations = self._annotations.union( + {"parententity": parententity} + ) + augment_onclause = onclause is None and _extra_criteria expression.Join.__init__(self, left, right, onclause, isouter, full) @@ -1875,13 +1893,16 @@ def join( join(User.addresses).\ filter(Address.email_address=='foo@bar.com') - See :ref:`orm_queryguide_joins` for information on modern usage - of ORM level joins. + .. warning:: using :func:`_orm.join` directly may not work properly + with modern ORM options such as :func:`_orm.with_loader_criteria`. + It is strongly recommended to use the idiomatic join patterns + provided by methods such as :meth:`.Select.join` and + :meth:`.Select.join_from` when creating ORM joins. - .. deprecated:: 0.8 + .. seealso:: - the ``join_to_left`` parameter is deprecated, and will be removed - in a future release. The parameter has no effect. + :ref:`orm_queryguide_joins` - in the :ref:`queryguide_toplevel` for + background on idiomatic ORM join patterns """ return _ORMJoin(left, right, onclause, isouter, full) diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py index 6f611eb3ad6..041e635ab10 100644 --- a/test/orm/inheritance/test_single.py +++ b/test/orm/inheritance/test_single.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle from sqlalchemy.orm import column_property +from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from sqlalchemy.orm import Session @@ -395,6 +396,124 @@ def test_select_from_aliased_w_subclass(self): "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", ) + @testing.combinations( + ( + lambda Engineer, Report: select(Report) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select(Report).select_from( + orm_join(Engineer, Report, Engineer.reports) + ), + ), + ( + lambda Engineer, Report: select(Report).join_from( + Engineer, Report, Engineer.reports + ), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_engineer") + def test_select_from_w_join_left(self, stmt_fn, alias_engineer): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + + if alias_engineer: + Engineer = aliased(Engineer) + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + if alias_engineer: + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM employees AS employees_1 JOIN reports " + "ON employees_1.employee_id = reports.employee_id " + "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", + ) + else: + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM employees JOIN reports ON employees.employee_id = " + "reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ).select_from(orm_join(Engineer, Report, Engineer.reports)), + ), + ( + lambda Engineer, Report: select( + Report.report_id, Engineer.employee_id + ).join_from(Engineer, Report, Engineer.reports), + ), + ) + def test_select_from_w_join_left_including_entity(self, stmt_fn): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + self.assert_compile( + stmt, + "SELECT reports.report_id, employees.employee_id " + "FROM employees JOIN reports ON employees.employee_id = " + "reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + + @testing.combinations( + ( + lambda Engineer, Report: select(Report).join( + Report.employee.of_type(Engineer) + ), + ), + ( + lambda Engineer, Report: select(Report).select_from( + orm_join(Report, Engineer, Report.employee.of_type(Engineer)) + ) + ), + ( + lambda Engineer, Report: select(Report).join_from( + Report, Engineer, Report.employee.of_type(Engineer) + ), + ), + ) + def test_select_from_w_join_right(self, stmt_fn): + """test #8721""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + self.assert_compile( + stmt, + "SELECT reports.report_id, reports.employee_id, reports.name " + "FROM reports JOIN employees ON employees.employee_id = " + "reports.employee_id AND employees.type " + "IN (__[POSTCOMPILE_type_1])", + ) + def test_from_statement_select(self): Engineer = self.classes.Engineer diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index 97650f53c76..ed4ab32950a 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -16,6 +16,7 @@ from sqlalchemy import testing from sqlalchemy.orm import aliased from sqlalchemy.orm import defer +from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload from sqlalchemy.orm import lazyload from sqlalchemy.orm import registry @@ -264,6 +265,144 @@ def test_criteria_post_replace(self, user_address_fixture): "WHERE users.name != :name_1", ) + @testing.combinations( + ( + lambda User, Address: select(Address) + .select_from(User) + .join(User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address) + .select_from(orm_join(User, Address, User.addresses)) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address) + .join_from(User, Address, User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_user") + def test_criteria_select_from_w_join_left( + self, user_address_fixture, stmt_fn, alias_user + ): + """test #8721""" + User, Address = user_address_fixture + + if alias_user: + User = aliased(User) + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + + if alias_user: + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, " + "addresses.email_address FROM users AS users_1 " + "JOIN addresses ON users_1.id = addresses.user_id " + "WHERE users_1.name != :name_1", + ) + else: + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, " + "addresses.email_address " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "WHERE users.name != :name_1", + ) + + @testing.combinations( + ( + lambda User, Address: select(Address.id, User.id) + .select_from(User) + .join(User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address.id, User.id) + .select_from(orm_join(User, Address, User.addresses)) + .options(with_loader_criteria(User, User.name != "name")), + ), + ( + lambda User, Address: select(Address.id, User.id) + .join_from(User, Address, User.addresses) + .options(with_loader_criteria(User, User.name != "name")), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_user") + def test_criteria_select_from_w_join_left_including_entity( + self, user_address_fixture, stmt_fn, alias_user + ): + """test #8721""" + User, Address = user_address_fixture + + if alias_user: + User = aliased(User) + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + + if alias_user: + self.assert_compile( + stmt, + "SELECT addresses.id, users_1.id AS id_1 " + "FROM users AS users_1 JOIN addresses " + "ON users_1.id = addresses.user_id " + "WHERE users_1.name != :name_1", + ) + else: + self.assert_compile( + stmt, + "SELECT addresses.id, users.id AS id_1 " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "WHERE users.name != :name_1", + ) + + @testing.combinations( + ( + lambda User, Address: select(Address) + .select_from(User) + .join(User.addresses) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), + ( + # for orm_join(), this is set up before we have the context + # available that allows with_loader_criteria to be set up + # correctly + lambda User, Address: select(Address) + .select_from(orm_join(User, Address, User.addresses)) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + testing.fails("not implemented right now"), + ), + ( + lambda User, Address: select(Address) + .join_from(User, Address, User.addresses) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), + argnames="stmt_fn", + ) + def test_criteria_select_from_w_join_right( + self, user_address_fixture, stmt_fn + ): + """test #8721""" + User, Address = user_address_fixture + + stmt = testing.resolve_lambda(stmt_fn, User=User, Address=Address) + self.assert_compile( + stmt, + "SELECT addresses.id, addresses.user_id, addresses.email_address " + "FROM users JOIN addresses ON users.id = addresses.user_id " + "AND addresses.email_address != :email_address_1", + ) + @testing.combinations( "select", "joined", From d8910c442ed609892f786fe89383a7176f1d94a9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 27 Oct 2022 09:28:02 -0400 Subject: [PATCH 411/632] apply basic escaping to anon_labels unconditionally Fixed issue which prevented the :func:`_sql.literal_column` construct from working properly within the context of a :class:`.Select` construct as well as other potential places where "anonymized labels" might be generated, if the literal expression contained characters which could interfere with format strings, such as open parenthesis, due to an implementation detail of the "anonymous label" structure. Fixes: #8724 Change-Id: I3089124fbd055a011c8a245964258503b717d941 (cherry picked from commit caa9f0ff98d44359f5162bca8e7fe7bcaa2989a7) --- doc/build/changelog/unreleased_14/8724.rst | 11 +++++++ lib/sqlalchemy/sql/elements.py | 7 ++++- test/sql/test_labels.py | 34 ++++++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8724.rst diff --git a/doc/build/changelog/unreleased_14/8724.rst b/doc/build/changelog/unreleased_14/8724.rst new file mode 100644 index 00000000000..8329697ceec --- /dev/null +++ b/doc/build/changelog/unreleased_14/8724.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, sql + :tickets: 8724 + + Fixed issue which prevented the :func:`_sql.literal_column` construct from + working properly within the context of a :class:`.Select` construct as well + as other potential places where "anonymized labels" might be generated, if + the literal expression contained characters which could interfere with + format strings, such as open parenthesis, due to an implementation detail + of the "anonymous label" structure. + diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index ace43b3a1d4..eb5bc5a0087 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -5371,8 +5371,13 @@ def safe_construct( cls, seed, body, enclosing_label=None, sanitize_key=False ): + # need to escape chars that interfere with format + # strings in any case, issue #8724 + body = re.sub(r"[%\(\) \$]+", "_", body) + if sanitize_key: - body = re.sub(r"[%\(\) \$]+", "_", body).strip("_") + # sanitize_key is then an extra step used by BindParameter + body = body.strip("_") label = "%%(%d %s)s" % (seed, body.replace("%", "%%")) if enclosing_label: diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py index d385b9e8d14..a82b0372eaa 100644 --- a/test/sql/test_labels.py +++ b/test/sql/test_labels.py @@ -3,6 +3,7 @@ from sqlalchemy import cast from sqlalchemy import exc as exceptions from sqlalchemy import Integer +from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import or_ from sqlalchemy import select @@ -16,6 +17,7 @@ from sqlalchemy.sql import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy.sql import roles from sqlalchemy.sql import table +from sqlalchemy.sql.base import prefix_anon_map from sqlalchemy.sql.elements import _truncated_label from sqlalchemy.sql.elements import ColumnElement from sqlalchemy.sql.elements import WrapsColumnExpression @@ -1038,3 +1040,35 @@ def test_column_auto_label_use_labels(self): "SOME_COL_THING(some_table.value) " "AS some_table_value FROM some_table", ) + + @testing.combinations( + # the resulting strings are completely arbitrary and are not + # exposed in SQL with current implementations. we want to + # only assert that the operation doesn't fail. It's safe to + # change the assertion cases for this test if the label escaping + # format changes + (literal_column("'(1,2]'"), "'_1,2]'_1"), + (literal_column("))"), "__1"), + (literal_column("'%('"), "'_'_1"), + ) + def test_labels_w_strformat_chars_in_isolation(self, test_case, expected): + """test #8724""" + + pa = prefix_anon_map() + eq_(test_case._anon_key_label % pa, expected) + + @testing.combinations( + ( + select(literal_column("'(1,2]'"), literal_column("'(1,2]'")), + "SELECT '(1,2]', '(1,2]'", + ), + (select(literal_column("))"), literal_column("))")), "SELECT )), ))"), + ( + select(literal_column("'%('"), literal_column("'%('")), + "SELECT '%(', '%('", + ), + ) + def test_labels_w_strformat_chars_in_statements(self, test_case, expected): + """test #8724""" + + self.assert_compile(test_case, expected) From 3ed60fc770dbafbc3f3193d473ab825d4bda243e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 28 Oct 2022 01:49:03 -0400 Subject: [PATCH 412/632] mssql doc updates clarify some URL things Change-Id: Ic162834052f06fd3a6c010ce5d091903fdc65cd8 (cherry picked from commit c262184ae5bac969b18eff8e10ba6d94c229499d) --- lib/sqlalchemy/dialects/mssql/pyodbc.py | 33 +++++++++++++------------ 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index edb76f26525..053b7ac5482 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -49,18 +49,18 @@ engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server") -Other keywords interpreted by the Pyodbc dialect to be passed to -``pyodbc.connect()`` in both the DSN and hostname cases include: -``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``, -``authentication``. -Note that in order for the dialect to recognize these keywords -(including the ``driver`` keyword above) they must be all lowercase. -Multiple additional keyword arguments must be separated by an -ampersand (``&``), not a semicolon:: - - engine = create_engine( - "mssql+pyodbc://scott:tiger@myhost:49242/databasename" - "?driver=ODBC+Driver+17+for+SQL+Server" +The ``driver`` keyword is significant to the pyodbc dialect and must be +specified in lowercase. + +Any other names passed in the query string are passed through in the pyodbc +connect string, such as ``authentication``, ``TrustServerCertificate``, etc. +Multiple keyword arguments must be separated by an ampersand (``&``); these +will be translated to semicolons when the pyodbc connect string is generated +internally:: + + e = create_engine( + "mssql+pyodbc://scott:tiger@mssql2017:1433/test?" + "driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes" "&authentication=ActiveDirectoryIntegrated" ) @@ -71,11 +71,12 @@ "mssql+pyodbc", username="scott", password="tiger", - host="myhost", - port=49242, - database="databasename", + host="mssql2017", + port=1433, + database="test", query={ - "driver": "ODBC Driver 17 for SQL Server", + "driver": "ODBC Driver 18 for SQL Server", + "TrustServerCertificate": "yes", "authentication": "ActiveDirectoryIntegrated", }, ) From 6d77db96f72b72cf4797ad54eed2e3dc889aecf4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 28 Oct 2022 01:49:48 -0400 Subject: [PATCH 413/632] open some compound tests for mysql 8.0.31 Not sure of exact version but as we have done a major rebuild of CI, newer mysql 8.0 is passing on these. Change-Id: Ibcfe0ce519ab6a2941ca514b4254944769b60df4 (cherry picked from commit 50d3b85c693a4ca673bcabd711f130ae58111f16) --- test/requirements.py | 30 ++++++++++++++++++++++++++++-- test/sql/test_query.py | 6 +++--- 2 files changed, 31 insertions(+), 5 deletions(-) diff --git a/test/requirements.py b/test/requirements.py index ca074c79b26..feb5e2a5225 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -731,7 +731,7 @@ def intersect(self): """Target database must support INTERSECT or equivalent.""" return fails_if( - ["firebird", self._mysql_not_mariadb_103, "sybase"], + ["firebird", self._mysql_not_mariadb_103_not_mysql8031, "sybase"], "no support for INTERSECT", ) @@ -739,7 +739,7 @@ def intersect(self): def except_(self): """Target database must support EXCEPT or equivalent (i.e. MINUS).""" return fails_if( - ["firebird", self._mysql_not_mariadb_103, "sybase"], + ["firebird", self._mysql_not_mariadb_103_not_mysql8031, "sybase"], "no support for EXCEPT", ) @@ -1732,12 +1732,38 @@ def _mysql_not_mariadb_103(self, config): or config.db.dialect._mariadb_normalized_version_info < (10, 3) ) + def _mysql_not_mariadb_103_not_mysql8031(self, config): + return (against(config, ["mysql", "mariadb"])) and ( + ( + config.db.dialect._is_mariadb + and config.db.dialect._mariadb_normalized_version_info + < (10, 3) + ) + or ( + not config.db.dialect._is_mariadb + and config.db.dialect.server_version_info < (8, 0, 31) + ) + ) + def _mysql_not_mariadb_104(self, config): return (against(config, ["mysql", "mariadb"])) and ( not config.db.dialect._is_mariadb or config.db.dialect._mariadb_normalized_version_info < (10, 4) ) + def _mysql_not_mariadb_104_not_mysql8031(self, config): + return (against(config, ["mysql", "mariadb"])) and ( + ( + config.db.dialect._is_mariadb + and config.db.dialect._mariadb_normalized_version_info + < (10, 4) + ) + or ( + not config.db.dialect._is_mariadb + and config.db.dialect.server_version_info < (8, 0, 31) + ) + ) + def _has_mysql_on_windows(self, config): with config.db.connect() as conn: return ( diff --git a/test/sql/test_query.py b/test/sql/test_query.py index 0d817011329..2c0cf152762 100644 --- a/test/sql/test_query.py +++ b/test/sql/test_query.py @@ -1241,7 +1241,7 @@ def test_union_ordered_alias(self, connection): "has trouble extracting anonymous column from union subquery", ) @testing.fails_on( - testing.requires._mysql_not_mariadb_104, "FIXME: unknown" + testing.requires._mysql_not_mariadb_104_not_mysql8031, "FIXME: unknown" ) @testing.fails_on("sqlite", "FIXME: unknown") def test_union_all(self, connection): @@ -1362,7 +1362,7 @@ def test_except_style2(self, connection): eq_(found2, wanted) @testing.fails_on( - ["sqlite", testing.requires._mysql_not_mariadb_104], + ["sqlite", testing.requires._mysql_not_mariadb_104_not_mysql8031], "Can't handle this style of nesting", ) @testing.requires.except_ @@ -1400,7 +1400,7 @@ def test_except_style4(self, connection): @testing.requires.intersect @testing.fails_on( - ["sqlite", testing.requires._mysql_not_mariadb_104], + ["sqlite", testing.requires._mysql_not_mariadb_104_not_mysql8031], "sqlite can't handle leading parenthesis", ) def test_intersect_unions(self, connection): From 2ea71e5df9b662cd08b4c1581441449643fbe431 Mon Sep 17 00:00:00 2001 From: Mike Barry Date: Wed, 26 Oct 2022 13:40:32 -0400 Subject: [PATCH 414/632] use only object_id() function for temp tables Fixed issue with :meth:`.Inspector.has_table` when used against a temporary table for the SQL Server dialect would fail an invalid object name error on some Azure variants, due to an unnecessary information schema query that is not supported on those server versions. Pull request courtesy Mike Barry. the patch also fills out test support for has_table() against temp tables, temp views, adding to the has_table() support just added for views in #8700. Fixes: #8714 Closes: #8716 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8716 Pull-request-sha: e2ac7a52e2b09a349a703ba1e1a2911f4d3c0912 Change-Id: Ia73e4e9e977a2d6b7e100abd2f81a8c8777dc9bb (cherry picked from commit 2af33d79eddc696c0fb1ef749999fa5d0d33f214) --- doc/build/changelog/unreleased_14/8700.rst | 15 ++-- doc/build/changelog/unreleased_14/8714.rst | 8 ++ lib/sqlalchemy/dialects/mssql/base.py | 27 ++----- lib/sqlalchemy/testing/requirements.py | 5 ++ .../testing/suite/test_reflection.py | 77 +++++++++++++++---- test/requirements.py | 12 +++ 6 files changed, 102 insertions(+), 42 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8714.rst diff --git a/doc/build/changelog/unreleased_14/8700.rst b/doc/build/changelog/unreleased_14/8700.rst index b9369e038bd..205f251ef40 100644 --- a/doc/build/changelog/unreleased_14/8700.rst +++ b/doc/build/changelog/unreleased_14/8700.rst @@ -2,14 +2,9 @@ :tags: bug, mssql, reflection :tickets: 8700 - Fixed regression which occurred throughout the 1.4 series where the - :meth:`.Inspector.has_table` method, which historically reported on views - as well, stopped working for SQL Server. The method never worked for - Oracle in this way, so for compatibility within the 1.4 series, - Oracle's dialect remains returning False for ``has_table()`` against a - view within the 1.4 series. - + Fixed issue with :meth:`.Inspector.has_table` when used against a view for + the SQL Server dialect would erroneously return ``False``, due to a + regression in the 1.4 series which removed support for this on SQL Server. The issue is not present in the 2.0 series which uses a different - reflection architecture, where has_table() reports True for views on all - backends including SQL Server and Oracle. Test support is added within the - 1.4 series to ensure ``has_table()`` remains working per spec re: views. + reflection architecture. Test support is added to ensure ``has_table()`` + remains working per spec re: views. diff --git a/doc/build/changelog/unreleased_14/8714.rst b/doc/build/changelog/unreleased_14/8714.rst new file mode 100644 index 00000000000..d75f2570edc --- /dev/null +++ b/doc/build/changelog/unreleased_14/8714.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mssql + :tickets: 8714 + + Fixed issue with :meth:`.Inspector.has_table` when used against a temporary + table for the SQL Server dialect would fail an invalid object name error on + some Azure variants, due to an unnecessary information schema query that is + not supported on those server versions. Pull request courtesy Mike Barry. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 738ff7ce34a..6c530b631c2 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2933,29 +2933,18 @@ def _get_default_schema_name(self, connection): @_db_plus_owner def has_table(self, connection, tablename, dbname, owner, schema): self._ensure_has_table_connection(connection) - if tablename.startswith("#"): # temporary table - tables = ischema.mssql_temp_table_columns - s = sql.select(tables.c.table_name).where( - tables.c.table_name.like( - self._temp_table_name_like_pattern(tablename) + if tablename.startswith("#"): # temporary table + # mssql does not support temporary views + # SQL Error [4103] [S0001]: "#v": Temporary views are not allowed + return bool( + connection.scalar( + # U filters on user tables only. + text("SELECT object_id(:table_name, 'U')"), + {"table_name": "tempdb.dbo.[{}]".format(tablename)}, ) ) - # #7168: fetch all (not just first match) in case some other #temp - # table with the same name happens to appear first - table_names = connection.execute(s).scalars().fetchall() - # #6910: verify it's not a temp table from another session - for table_name in table_names: - if bool( - connection.scalar( - text("SELECT object_id(:table_name)"), - {"table_name": "tempdb.dbo.[{}]".format(table_name)}, - ) - ): - return True - else: - return False else: tables = ischema.tables diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 857d1fdef1e..7e8a030e322 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -663,6 +663,11 @@ def temp_table_names(self): """target dialect supports listing of temporary table names""" return exclusions.closed() + @property + def has_temp_table(self): + """target dialect supports checking a single temp table name""" + return exclusions.closed() + @property def temporary_tables(self): """target database supports temporary tables""" diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index ff98f18c073..3f234d2ea9c 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -33,7 +33,23 @@ metadata, users = None, None -class HasTableTest(fixtures.TablesTest): +class OneConnectionTablesTest(fixtures.TablesTest): + @classmethod + def setup_bind(cls): + # TODO: when temp tables are subject to server reset, + # this will also have to disable that server reset from + # happening + if config.requirements.independent_connections.enabled: + from sqlalchemy import pool + + return engines.testing_engine( + options=dict(poolclass=pool.StaticPool, scope="class"), + ) + else: + return config.db + + +class HasTableTest(OneConnectionTablesTest): __backend__ = True @classmethod @@ -55,6 +71,8 @@ def define_tables(cls, metadata): if testing.requires.view_reflection: cls.define_views(metadata) + if testing.requires.has_temp_table.enabled: + cls.define_temp_tables(metadata) @classmethod def define_views(cls, metadata): @@ -75,6 +93,37 @@ def define_views(cls, metadata): DDL("DROP VIEW %s.vv" % (config.test_schema)), ) + @classmethod + def temp_table_name(cls): + return get_temp_table_name( + config, config.db, "user_tmp_%s" % (config.ident,) + ) + + @classmethod + def define_temp_tables(cls, metadata): + kw = temp_table_keyword_args(config, config.db) + table_name = cls.temp_table_name() + user_tmp = Table( + table_name, + metadata, + Column("id", sa.INT, primary_key=True), + Column("name", sa.VARCHAR(50)), + **kw + ) + if ( + testing.requires.view_reflection.enabled + and testing.requires.temporary_views.enabled + ): + event.listen( + user_tmp, + "after_create", + DDL( + "create temporary view user_tmp_v as " + "select * from user_tmp_%s" % config.ident + ), + ) + event.listen(user_tmp, "before_drop", DDL("drop view user_tmp_v")) + def test_has_table(self): with config.db.begin() as conn: is_true(config.db.dialect.has_table(conn, "test_table")) @@ -110,6 +159,19 @@ def test_has_table_view(self, connection): insp = inspect(connection) is_true(insp.has_table("vv")) + @testing.requires.has_temp_table + def test_has_table_temp_table(self, connection): + insp = inspect(connection) + temp_table_name = self.temp_table_name() + is_true(insp.has_table(temp_table_name)) + + @testing.requires.has_temp_table + @testing.requires.view_reflection + @testing.requires.temporary_views + def test_has_table_temp_view(self, connection): + insp = inspect(connection) + is_true(insp.has_table("user_tmp_v")) + @testing.fails_on( "oracle", "per #8700 this remains at its previous behavior of not " @@ -326,22 +388,11 @@ def test_get_check_constraints(self, name): assert insp.get_check_constraints(name) -class ComponentReflectionTest(fixtures.TablesTest): +class ComponentReflectionTest(OneConnectionTablesTest): run_inserts = run_deletes = None __backend__ = True - @classmethod - def setup_bind(cls): - if config.requirements.independent_connections.enabled: - from sqlalchemy import pool - - return engines.testing_engine( - options=dict(poolclass=pool.StaticPool, scope="class"), - ) - else: - return config.db - @classmethod def define_tables(cls, metadata): cls.define_reflected_tables(metadata, None) diff --git a/test/requirements.py b/test/requirements.py index feb5e2a5225..b5e9711115c 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -608,6 +608,18 @@ def implicit_default_schema(self): """ return only_on(["postgresql"]) + @property + def has_temp_table(self): + """target dialect supports checking a single temp table name + + unfortunately this is not the same as temp_table_names + + """ + + return only_on(["sqlite", "oracle", "postgresql", "mssql"]) + skip_if( + self._sqlite_file_db + ) + @property def default_schema_name_switch(self): return only_on(["postgresql", "oracle"]) From 2d7582e4ea98e6d60a734e2e9944d5aeda55a439 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 28 Oct 2022 12:20:22 -0400 Subject: [PATCH 415/632] update rel/fk FAQ entry this entry still made the assumptions of behavior before ticket #3061, that accessing a non-initialized scalar attribute on a pending object would populate the attribute with None. It also used the word "initialize" when referring to a persistent object which is a misleading term, it's "loaded", even though in this example it's "loading" the value of None. Fix up the language to be more consistent with the #3061 change. Change-Id: I1abd8f1d2e9c44ebc9a29737ea270b338f104a3e (cherry picked from commit 654d941ce9c571de18aa09a09dc6cd90bf24734c) --- doc/build/faq/sessions.rst | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/doc/build/faq/sessions.rst b/doc/build/faq/sessions.rst index c070781981b..43f3673bbd9 100644 --- a/doc/build/faq/sessions.rst +++ b/doc/build/faq/sessions.rst @@ -400,14 +400,21 @@ an "expire" event of the :func:`_orm.relationship` in which it's involved. This that for the following sequence:: o = Session.query(SomeClass).first() - assert o.foo is None # accessing an un-set attribute sets it to None + + # assume the existing o.foo_id value is None; + # accessing o.foo will reconcile this as ``None``, but will effectively + # "load" the value of None + assert o.foo is None + + # now set foo_id to something. o.foo will not be immediately affected o.foo_id = 7 -``o.foo`` is initialized to ``None`` when we first accessed it. Setting -``o.foo_id = 7`` will have the value of "7" as pending, but no flush +``o.foo`` is loaded with its effective database value of ``None`` when it +is first accessed. Setting +``o.foo_id = 7`` will have the value of "7" as a pending change, but no flush has occurred - so ``o.foo`` is still ``None``:: - # attribute is already set to None, has not been + # attribute is already "loaded" as None, has not been # reconciled with o.foo_id = 7 yet assert o.foo is None @@ -415,11 +422,12 @@ For ``o.foo`` to load based on the foreign key mutation is usually achieved naturally after the commit, which both flushes the new foreign key value and expires all state:: - Session.commit() # expires all attributes + session.commit() # expires all attributes foo_7 = Session.query(Foo).get(7) - assert o.foo is foo_7 # o.foo lazyloads on access + # o.foo will lazyload again, this time getting the new object + assert o.foo is foo_7 A more minimal operation is to expire the attribute individually - this can be performed for any :term:`persistent` object using :meth:`.Session.expire`:: @@ -442,14 +450,13 @@ have meaning until the row is inserted; otherwise there is no row yet:: Session.add(new_obj) - # accessing an un-set attribute sets it to None + # returns None but this is not a "lazyload", as the object is not + # persistent in the DB yet, and the None value is not part of the + # object's state assert new_obj.foo is None Session.flush() # emits INSERT - # expire this because we already set .foo to None - Session.expire(o, ["foo"]) - assert new_obj.foo is foo_7 # now it loads .. topic:: Attribute loading for non-persistent objects From d8c4c34aafdca6e62061944566ccc26864551a00 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 29 Oct 2022 20:08:25 -0400 Subject: [PATCH 416/632] fix test for same mapper to use "isa" Fixed issue in joined eager loading where an assertion fail would occur with a particular combination of outer/inner joined eager loads in conjunction with an inherited subclass mapper as the middle target. Fixes: #8738 Change-Id: I4909e7518302cbb82046e0425abbbdc8eb1c0146 (cherry picked from commit 99e7afb4b2d82baff80f5d1fe1b2d1b21cbbec09) --- doc/build/changelog/unreleased_14/8738.rst | 8 ++ lib/sqlalchemy/orm/strategies.py | 10 +- test/orm/inheritance/test_relationship.py | 108 +++++++++++++++++++++ 3 files changed, 123 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8738.rst diff --git a/doc/build/changelog/unreleased_14/8738.rst b/doc/build/changelog/unreleased_14/8738.rst new file mode 100644 index 00000000000..2372c25afe1 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8738.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 8731 + + Fixed issue in joined eager loading where an assertion fail would occur + with a particular combination of outer/inner joined eager loads in + conjunction with an inherited subclass mapper as the middle target. + diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index a014b2f4115..2b094214117 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -2386,6 +2386,11 @@ def _splice_nested_inner_join( self, path, join_obj, clauses, onclause, extra_criteria, splicing=False ): + # recursive fn to splice a nested join into an existing one. + # splicing=False means this is the outermost call, and it + # should return a value. splicing= is the recursive + # form, where it can return None to indicate the end of the recursion + if splicing is False: # first call is always handed a join object # from the outside @@ -2400,7 +2405,7 @@ def _splice_nested_inner_join( splicing, ) elif not isinstance(join_obj, orm_util._ORMJoin): - if path[-2] is splicing: + if path[-2].isa(splicing): return orm_util._ORMJoin( join_obj, clauses.aliased_class, @@ -2411,7 +2416,6 @@ def _splice_nested_inner_join( _extra_criteria=extra_criteria, ) else: - # only here if splicing == True return None target_join = self._splice_nested_inner_join( @@ -2434,7 +2438,7 @@ def _splice_nested_inner_join( ) if target_join is None: # should only return None when recursively called, - # e.g. splicing==True + # e.g. splicing refers to a from obj assert ( splicing is not False ), "assertion failed attempting to produce joined eager loads" diff --git a/test/orm/inheritance/test_relationship.py b/test/orm/inheritance/test_relationship.py index 0b1967f5191..c4e20fefce1 100644 --- a/test/orm/inheritance/test_relationship.py +++ b/test/orm/inheritance/test_relationship.py @@ -3015,3 +3015,111 @@ def test_load_m2o_use_get(self): is_(obj.child2, None) is_(obj.parent, c1) + + +class JoinedLoadSpliceFromJoinedTest( + testing.AssertsCompiledSQL, fixtures.DeclarativeMappedTest +): + """test #8378""" + + __dialect__ = "default" + run_create_tables = None + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Root(Base): + __tablename__ = "root" + + id = Column(Integer, primary_key=True) + root_elements = relationship("BaseModel") + + class BaseModel(Base): + __tablename__ = "base_model" + + id = Column(Integer, primary_key=True) + root_id = Column(Integer, ForeignKey("root.id"), nullable=False) + type = Column(String, nullable=False) + __mapper_args__ = {"polymorphic_on": type} + + class SubModel(BaseModel): + elements = relationship("SubModelElement") + __mapper_args__ = {"polymorphic_identity": "sub_model"} + + class SubModelElement(Base): + __tablename__ = "sub_model_element" + + id = Column(Integer, primary_key=True) + model_id = Column(ForeignKey("base_model.id"), nullable=False) + + def test_oj_ij(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload(Root.root_elements.of_type(SubModel)).joinedload( + SubModel.elements, innerjoin=True + ) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root LEFT OUTER JOIN (base_model AS base_model_1 " + "JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id) " + "ON root.id = base_model_1.root_id", + ) + + def test_ij_oj(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload( + Root.root_elements.of_type(SubModel), innerjoin=True + ).joinedload(SubModel.elements) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root JOIN base_model AS base_model_1 " + "ON root.id = base_model_1.root_id " + "LEFT OUTER JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id" + "", + ) + + def test_ij_ij(self): + Root, SubModel = self.classes("Root", "SubModel") + + s = Session() + query = s.query(Root) + query = query.options( + joinedload( + Root.root_elements.of_type(SubModel), innerjoin=True + ).joinedload(SubModel.elements, innerjoin=True) + ) + self.assert_compile( + query, + "SELECT root.id AS root_id, base_model_1.id AS base_model_1_id, " + "base_model_1.root_id AS base_model_1_root_id, " + "base_model_1.type AS base_model_1_type, " + "sub_model_element_1.id AS sub_model_element_1_id, " + "sub_model_element_1.model_id AS sub_model_element_1_model_id " + "FROM root JOIN base_model AS base_model_1 " + "ON root.id = base_model_1.root_id " + "JOIN sub_model_element AS sub_model_element_1 " + "ON base_model_1.id = sub_model_element_1.model_id" + "", + ) From d593d63d81fe7db0bebaa2371366343db33ed576 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 25 Oct 2022 16:00:50 -0400 Subject: [PATCH 417/632] ensure pool.reset event always called for reset Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be called when a :class:`.Connection` were closed which already closed its own transaction. Logic that bypasses the "rollback on return" behavior of the pool was also skipping the event hook being emitted, preventing custom pool reset schemes from being used within this hook. This was a regression that appeared in version 1.4. For version 1.4, the hook is still not called in the case of an asyncio connection that is being discarded due to garbage collection. Version 2.0 will feature an improved version of :meth:`.PoolEvents.reset` which also receives contextual information about the reset, so that comprehensive "custom connection reset" schemes can be devised. Existing custom reset schemes that make use of :meth:`.PoolEvents.checkin` remain usable as they typically only need to deal with connections that are to be re-used. Change-Id: Ie17c4f55d02beb6f570b9de6b3044baffa7d6df6 Fixes: #8717 (cherry picked from commit bb8c36c5d2622e6e7033dc59dc98da0926ba7c00) --- doc/build/changelog/unreleased_14/8717.rst | 19 +++ doc/build/conf.py | 2 +- doc/build/core/pooling.rst | 165 ++++++++++++++------- lib/sqlalchemy/dialects/mssql/base.py | 54 +++++++ lib/sqlalchemy/dialects/postgresql/base.py | 62 ++++++++ lib/sqlalchemy/engine/base.py | 2 +- lib/sqlalchemy/engine/create.py | 2 +- lib/sqlalchemy/event/legacy.py | 16 +- lib/sqlalchemy/pool/base.py | 104 +++++++------ lib/sqlalchemy/pool/events.py | 36 ++++- test/engine/test_logging.py | 20 ++- test/engine/test_pool.py | 79 +++++++++- 12 files changed, 445 insertions(+), 116 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8717.rst diff --git a/doc/build/changelog/unreleased_14/8717.rst b/doc/build/changelog/unreleased_14/8717.rst new file mode 100644 index 00000000000..4f3c5cd472f --- /dev/null +++ b/doc/build/changelog/unreleased_14/8717.rst @@ -0,0 +1,19 @@ +.. change:: + :tags: bug, engine, regression + :tickets: 8717 + + Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be + called when a :class:`.Connection` were closed which already called + ``.rollback()`` on its own transaction, due to an enhancement in the 1.4 + series that ensures ``.rollback()`` is only called once in this scenario, + rather than twice. This would prevent custom pool reset schemes from being + used within this hook. This was a regression that appeared in version 1.4. + + For version 1.4, the :meth:`.PoolEvents.checkin` likely remains a better + event to use for custom "reset" implementations. Version 2.0 will feature + an improved version of :meth:`.PoolEvents.reset` which is called for + additional scenarios such as termination of asyncio connections, and is + also passed contextual information about the reset, to allow for "custom + connection reset" schemes which can respond to different reset scenarios in + different ways. + diff --git a/doc/build/conf.py b/doc/build/conf.py index d1144b41bbc..86f1925303c 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -106,7 +106,7 @@ changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s" -exclude_patterns = ["build", "**/unreleased*/*", "**/*_include.rst"] +exclude_patterns = ["build", "**/unreleased*/*", "**/*_include.rst", ".venv"] # zzzeeksphinx makes these conversions when it is rendering the # docstrings classes, methods, and functions within the scope of diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index 138feace286..c147b1d0b7d 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -133,43 +133,116 @@ however and in particular is not supported with asyncio DBAPI drivers. Reset On Return --------------- -The pool also includes the a "reset on return" feature which will call the -``rollback()`` method of the DBAPI connection when the connection is returned -to the pool. This is so that any existing -transaction on the connection is removed, not only ensuring that no existing -state remains on next usage, but also so that table and row locks are released -as well as that any isolated data snapshots are removed. This ``rollback()`` -occurs in most cases even when using an :class:`_engine.Engine` object, -except in the case when the :class:`_engine.Connection` can guarantee -that a ``rollback()`` has been called immediately before the connection -is returned to the pool. - -For most DBAPIs, the call to ``rollback()`` is very inexpensive and if the +The pool includes "reset on return" behavior which will call the ``rollback()`` +method of the DBAPI connection when the connection is returned to the pool. +This is so that any existing transactional state is removed from the +connection, which includes not just uncommitted data but table and row locks as +well. For most DBAPIs, the call to ``rollback()`` is inexpensive, and if the DBAPI has already completed a transaction, the method should be a no-op. -However, for DBAPIs that incur performance issues with ``rollback()`` even if -there's no state on the connection, this behavior can be disabled using the -``reset_on_return`` option of :class:`_pool.Pool`. The behavior is safe -to disable under the following conditions: - -* If the database does not support transactions at all, such as using - MySQL with the MyISAM engine, or the DBAPI is used in autocommit - mode only, the behavior can be disabled. -* If the pool itself doesn't maintain a connection after it's checked in, - such as when using :class:`.NullPool`, the behavior can be disabled. -* Otherwise, it must be ensured that: - - * the application ensures that all :class:`_engine.Connection` - objects are explicitly closed out using a context manager (i.e. ``with`` - block) or a ``try/finally`` style block - * connections are never allowed to be garbage collected before being explicitly - closed. - * the DBAPI connection itself, e.g. ``connection.connection``, is not used - directly, or the application ensures that ``.rollback()`` is called - on this connection before releasing it back to the connection pool. - -The "reset on return" step may be logged using the ``logging.DEBUG`` + +Disabling Reset on Return for non-transactional connections +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For very specific cases where this ``rollback()`` is not useful, such as when +using a connection that is configured for +:ref:`autocommit ` or when using a database +that has no ACID capabilities such as the MyISAM engine of MySQL, the +reset-on-return behavior can be disabled, which is typically done for +performance reasons. This can be affected by using the +:paramref:`_pool.Pool.reset_on_return` parameter of :class:`_pool.Pool`, which +is also available from :func:`_sa.create_engine` as +:paramref:`_sa.create_engine.pool_reset_on_return`, passing a value of ``None``. +This is illustrated in the example below, in conjunction with the +:paramref:`.create_engine.isolation_level` parameter setting of +``AUTOCOMMIT``:: + + non_acid_engine = create_engine( + "mysql://scott:tiger@host/db", + pool_reset_on_return=None, + isolation_level="AUTOCOMMIT", + ) + +The above engine won't actually perform ROLLBACK when connections are returned +to the pool; since AUTOCOMMIT is enabled, the driver will also not perform +any BEGIN operation. + +Custom Reset-on-Return Schemes +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +"reset on return" consisting of a single ``rollback()`` may not be sufficient +for some use cases; in particular, applications which make use of temporary +tables may wish for these tables to be automatically removed on connection +checkin. Some (but notably not all) backends include features that can "reset" +such tables within the scope of a database connection, which may be a desirable +behavior for connection pool reset. Other server resources such as prepared +statement handles and server-side statement caches may persist beyond the +checkin process, which may or may not be desirable, depending on specifics. +Again, some (but again not all) backends may provide for a means of resetting +this state. The two SQLAlchemy included dialects which are known to have +such reset schemes include Microsoft SQL Server, where an undocumented but +widely known stored procedure called ``sp_reset_connection`` is often used, +and PostgreSQL, which has a well-documented series of commands including +``DISCARD`` ``RESET``, ``DEALLOCATE``, and ``UNLISTEN``. + +.. note: next paragraph + example should match mssql/base.py example + +The following example illustrates how to replace reset on return with the +Microsoft SQL Server ``sp_reset_connection`` stored procedure, using the +:meth:`.PoolEvents.reset` event hook (**requires SQLAlchemy 1.4.43 or greater**). +The :paramref:`_sa.create_engine.pool_reset_on_return` parameter is set to +``None`` so that the custom scheme can replace the default behavior completely. +The custom hook implementation calls ``.rollback()`` in any case, as it's +usually important that the DBAPI's own tracking of commit/rollback will remain +consistent with the state of the transaction:: + + from sqlalchemy import create_engine + from sqlalchemy import event + + mssql_engine = create_engine( + "mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server", + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(mssql_engine, "reset") + def _reset_mssql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("{call sys.sp_reset_connection}") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + * :ref:`mssql_reset_on_return` - in the :ref:`mssql_toplevel` documentation + * :ref:`postgresql_reset_on_return` in the :ref:`postgresql_toplevel` documentation + +Logging reset-on-return events +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Logging for pool events including reset on return can be set +``logging.DEBUG`` log level along with the ``sqlalchemy.pool`` logger, or by setting -``echo_pool='debug'`` with :func:`_sa.create_engine`. +:paramref:`_sa.create_engine.echo_pool` to ``"debug"`` when using +:func:`_sa.create_engine`:: + + >>> from sqlalchemy import create_engine + >>> engine = create_engine("postgresql://scott:tiger@localhost/test", echo_pool="debug") + +The above pool will show verbose logging including reset on return:: + + >>> c1 = engine.connect() + DEBUG sqlalchemy.pool.impl.QueuePool Created new connection + DEBUG sqlalchemy.pool.impl.QueuePool Connection checked out from pool + >>> c1.close() + DEBUG sqlalchemy.pool.impl.QueuePool Connection being returned to pool + DEBUG sqlalchemy.pool.impl.QueuePool Connection rollback-on-return + Pool Events ----------- @@ -590,32 +663,22 @@ API Documentation - Available Pool Implementations -------------------------------------------------- .. autoclass:: sqlalchemy.pool.Pool - - .. automethod:: __init__ - .. automethod:: connect - .. automethod:: dispose - .. automethod:: recreate + :members: .. autoclass:: sqlalchemy.pool.QueuePool - - .. automethod:: __init__ - .. automethod:: connect + :members: .. autoclass:: SingletonThreadPool - - .. automethod:: __init__ + :members: .. autoclass:: AssertionPool - - .. automethod:: __init__ + :members: .. autoclass:: NullPool - - .. automethod:: __init__ + :members: .. autoclass:: StaticPool - - .. automethod:: __init__ + :members: .. autoclass:: _ConnectionFairy :members: diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 6c530b631c2..0509413062f 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -442,6 +442,60 @@ class TestTable(Base): :ref:`dbapi_autocommit` +.. _mssql_reset_on_return: + +Temporary Table / Resource Reset for Connection Pooling +------------------------------------------------------- + +The :class:`.QueuePool` connection pool implementation used +by the SQLAlchemy :class:`_sa.Engine` object includes +:ref:`reset on return ` behavior that will invoke +the DBAPI ``.rollback()`` method when connections are returned to the pool. +While this rollback will clear out the immediate state used by the previous +transaction, it does not cover a wider range of session-level state, including +temporary tables as well as other server state such as prepared statement +handles and statement caches. An undocumented SQL Server procedure known +as ``sp_reset_connection`` is known to be a workaround for this issue which +will reset most of the session state that builds up on a connection, including +temporary tables. + +To install ``sp_reset_connection`` as the means of performing reset-on-return, +the :meth:`.PoolEvents.reset` event hook may be used, as demonstrated in the +example below (**requires SQLAlchemy 1.4.43 or greater**). The +:paramref:`_sa.create_engine.pool_reset_on_return` parameter is set to ``None`` +so that the custom scheme can replace the default behavior completely. The +custom hook implementation calls ``.rollback()`` in any case, as it's usually +important that the DBAPI's own tracking of commit/rollback will remain +consistent with the state of the transaction:: + + from sqlalchemy import create_engine + from sqlalchemy import event + + mssql_engine = create_engine( + "mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server", + + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(mssql_engine, "reset") + def _reset_mssql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("{call sys.sp_reset_connection}") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + + :ref:`pool_reset_on_return` - in the :ref:`pooling_toplevel` documentation + Nullability ----------- MSSQL has support for three levels of column nullability. The default diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index eb841700d3b..6820aa60154 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -230,6 +230,68 @@ def use_identity(element, compiler, **kw): .. versionadded:: 1.4 added support for the ``postgresql_readonly`` and ``postgresql_deferrable`` execution options. +.. _postgresql_reset_on_return: + +Temporary Table / Resource Reset for Connection Pooling +------------------------------------------------------- + +The :class:`.QueuePool` connection pool implementation used +by the SQLAlchemy :class:`_sa.Engine` object includes +:ref:`reset on return ` behavior that will invoke +the DBAPI ``.rollback()`` method when connections are returned to the pool. +While this rollback will clear out the immediate state used by the previous +transaction, it does not cover a wider range of session-level state, including +temporary tables as well as other server state such as prepared statement +handles and statement caches. The PostgreSQL database includes a variety +of commands which may be used to reset this state, including +``DISCARD``, ``RESET``, ``DEALLOCATE``, and ``UNLISTEN``. + + +To install +one or more of these commands as the means of performing reset-on-return, +the :meth:`.PoolEvents.reset` event hook may be used, as demonstrated +in the example below (**requires SQLAlchemy 1.4.43 or greater**). The implementation +will end transactions in progress as well as discard temporary tables +using the ``CLOSE``, ``RESET`` and ``DISCARD`` commands; see the PostgreSQL +documentation for background on what each of these statements do. + +The :paramref:`_sa.create_engine.pool_reset_on_return` parameter +is set to ``None`` so that the custom scheme can replace the default behavior +completely. The custom hook implementation calls ``.rollback()`` in any case, +as it's usually important that the DBAPI's own tracking of commit/rollback +will remain consistent with the state of the transaction:: + + + from sqlalchemy import create_engine + from sqlalchemy import event + + postgresql_engine = create_engine( + "postgresql+pyscopg2://scott:tiger@hostname/dbname", + + # disable default reset-on-return scheme + pool_reset_on_return=None, + ) + + + @event.listens_for(postgresql_engine, "reset") + def _reset_mssql(dbapi_connection, connection_record, reset_state): + dbapi_connection.execute("CLOSE ALL") + dbapi_connection.execute("RESET ALL") + dbapi_connection.execute("DISCARD TEMP") + + # so that the DBAPI itself knows that the connection has been + # reset + dbapi_connection.rollback() + +.. versionchanged:: 1.4.43 Ensured the :meth:`.PoolEvents.reset` event + is invoked for all "reset" occurrences, so that it's appropriate + as a place for custom "reset" handlers. Previous schemes which + use the :meth:`.PoolEvents.checkin` handler remain usable as well. + +.. seealso:: + + :ref:`pool_reset_on_return` - in the :ref:`pooling_toplevel` documentation + .. _postgresql_alternate_search_path: Setting Alternate Search Paths on Connect diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index f126eb0c56e..00e1be77669 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1241,7 +1241,7 @@ def close(self): # as we just closed the transaction, close the connection # pool connection without doing an additional reset if skip_reset: - conn._close_no_reset() + conn._close_special(transaction_reset=True) else: conn.close() diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index b9886b701b7..8c929ccc4ab 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -445,7 +445,7 @@ def create_engine(url, **kwargs): .. seealso:: - :paramref:`_pool.Pool.reset_on_return` + :ref:`pool_reset_on_return` :param pool_timeout=30: number of seconds to wait before giving up on getting a connection from the pool. This is only used diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index d9f6ce57354..686e4c5bf5d 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -144,9 +144,9 @@ def _legacy_listen_examples(dispatch_collection, sample_target, fn): def _version_signature_changes(parent_dispatch_cls, dispatch_collection): since, args, conv = dispatch_collection.legacy_signatures[0] return ( - "\n.. deprecated:: %(since)s\n" - " The :class:`.%(clsname)s.%(event_name)s` event now accepts the \n" - " arguments ``%(named_event_arguments)s%(has_kw_arguments)s``.\n" + "\n.. versionchanged:: %(since)s\n" + " The :meth:`.%(clsname)s.%(event_name)s` event now accepts the \n" + " arguments %(named_event_arguments)s%(has_kw_arguments)s.\n" " Support for listener functions which accept the previous \n" ' argument signature(s) listed above as "deprecated" will be \n' " removed in a future release." @@ -154,7 +154,15 @@ def _version_signature_changes(parent_dispatch_cls, dispatch_collection): "since": since, "clsname": parent_dispatch_cls.__name__, "event_name": dispatch_collection.name, - "named_event_arguments": ", ".join(dispatch_collection.arg_names), + "named_event_arguments": ", ".join( + ":paramref:`.%(clsname)s.%(event_name)s.%(param_name)s`" + % { + "clsname": parent_dispatch_cls.__name__, + "event_name": dispatch_collection.name, + "param_name": param_name, + } + for param_name in dispatch_collection.arg_names + ), "has_kw_arguments": ", **kw" if dispatch_collection.has_kw else "", } ) diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index 9f16c654334..a8234c53093 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -115,34 +115,39 @@ def __init__( logging. :param reset_on_return: Determine steps to take on - connections as they are returned to the pool, which were - not otherwise handled by a :class:`_engine.Connection`. - - reset_on_return can have any of these values: - - * ``"rollback"`` - call rollback() on the connection, - to release locks and transaction resources. - This is the default value. The vast majority - of use cases should leave this value set. - * ``True`` - same as 'rollback', this is here for - backwards compatibility. - * ``"commit"`` - call commit() on the connection, - to release locks and transaction resources. - A commit here may be desirable for databases that - cache query plans if a commit is emitted, - such as Microsoft SQL Server. However, this - value is more dangerous than 'rollback' because - any data changes present on the transaction - are committed unconditionally. - * ``None`` - don't do anything on the connection. - This setting is only appropriate if the database / DBAPI - works in pure "autocommit" mode at all times, or if the - application uses the :class:`_engine.Engine` with consistent - connectivity patterns. See the section - :ref:`pool_reset_on_return` for more details. - - * ``False`` - same as None, this is here for - backwards compatibility. + connections as they are returned to the pool, which were + not otherwise handled by a :class:`_engine.Connection`. + Available from :func:`_sa.create_engine` via the + :paramref:`_sa.create_engine.pool_reset_on_return` parameter. + + :paramref:`_pool.Pool.reset_on_return` can have any of these values: + + * ``"rollback"`` - call rollback() on the connection, + to release locks and transaction resources. + This is the default value. The vast majority + of use cases should leave this value set. + * ``"commit"`` - call commit() on the connection, + to release locks and transaction resources. + A commit here may be desirable for databases that + cache query plans if a commit is emitted, + such as Microsoft SQL Server. However, this + value is more dangerous than 'rollback' because + any data changes present on the transaction + are committed unconditionally. + * ``None`` - don't do anything on the connection. + This setting may be appropriate if the database / DBAPI + works in pure "autocommit" mode at all times, or if + a custom reset handler is established using the + :meth:`.PoolEvents.reset` event handler. + * ``True`` - same as 'rollback', this is here for + backwards compatibility. + * ``False`` - same as None, this is here for + backwards compatibility. + + For further customization of reset on return, the + :meth:`.PoolEvents.reset` event hook may be used which can perform + any connection activity desired on reset. (requires version 1.4.43 + or greater) .. seealso:: @@ -495,7 +500,9 @@ def checkout(cls, pool): rec.fairy_ref = ref = weakref.ref( fairy, lambda ref: _finalize_fairy - and _finalize_fairy(None, rec, pool, ref, echo, True), + and _finalize_fairy( + None, rec, pool, ref, echo, transaction_was_reset=False + ), ) _strong_ref_connection_records[ref] = rec if echo: @@ -697,7 +704,7 @@ def _finalize_fairy( pool, ref, # this is None when called directly, not by the gc echo, - reset=True, + transaction_was_reset=False, fairy=None, ): """Cleanup for a :class:`._ConnectionFairy` whether or not it's already @@ -735,11 +742,8 @@ def _finalize_fairy( if dbapi_connection is not None: if connection_record and echo: pool.logger.debug( - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", dbapi_connection, - ", transaction state was already reset by caller" - if not reset - else "", ) try: @@ -749,8 +753,8 @@ def _finalize_fairy( echo, ) assert fairy.dbapi_connection is dbapi_connection - if reset and can_manipulate_connection: - fairy._reset(pool) + if can_manipulate_connection: + fairy._reset(pool, transaction_was_reset) if detach: if connection_record: @@ -978,14 +982,14 @@ def _checkout(cls, pool, threadconns=None, fairy=None): def _checkout_existing(self): return _ConnectionFairy._checkout(self._pool, fairy=self) - def _checkin(self, reset=True): + def _checkin(self, transaction_was_reset=False): _finalize_fairy( self.dbapi_connection, self._connection_record, self._pool, None, self._echo, - reset=reset, + transaction_was_reset=transaction_was_reset, fairy=self, ) self.dbapi_connection = None @@ -993,15 +997,23 @@ def _checkin(self, reset=True): _close = _checkin - def _reset(self, pool): + def _reset(self, pool, transaction_was_reset=False): if pool.dispatch.reset: pool.dispatch.reset(self, self._connection_record) if pool._reset_on_return is reset_rollback: - if self._echo: - pool.logger.debug( - "Connection %s rollback-on-return", self.dbapi_connection - ) - pool._dialect.do_rollback(self) + if transaction_was_reset: + if self._echo: + pool.logger.debug( + "Connection %s reset, transaction already reset", + self.dbapi_connection, + ) + else: + if self._echo: + pool.logger.debug( + "Connection %s rollback-on-return", + self.dbapi_connection, + ) + pool._dialect.do_rollback(self) elif pool._reset_on_return is reset_commit: if self._echo: pool.logger.debug( @@ -1131,7 +1143,7 @@ def close(self): if self._counter == 0: self._checkin() - def _close_no_reset(self): + def _close_special(self, transaction_reset=False): self._counter -= 1 if self._counter == 0: - self._checkin(reset=False) + self._checkin(transaction_was_reset=transaction_reset) diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index 2829a58ae30..f0f97832bf1 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -151,17 +151,37 @@ def checkin(self, dbapi_connection, connection_record): def reset(self, dbapi_connection, connection_record): """Called before the "reset" action occurs for a pooled connection. - This event represents - when the ``rollback()`` method is called on the DBAPI connection - before it is returned to the pool. The behavior of "reset" can - be controlled, including disabled, using the ``reset_on_return`` - pool argument. - - + This event represents when the ``rollback()`` method is called on the + DBAPI connection before it is returned to the pool or discarded. A + custom "reset" strategy may be implemented using this event hook, which + may also be combined with disabling the default "reset" behavior using + the :paramref:`_pool.Pool.reset_on_return` parameter. + + The primary difference between the :meth:`_events.PoolEvents.reset` and + :meth:`_events.PoolEvents.checkin` events are that + :meth:`_events.PoolEvents.reset` is called not just for pooled + connections that are being returned to the pool, but also for + connections that were detached using the + :meth:`_engine.Connection.detach` method. + + Note that the event **is not** invoked for connections that were + invalidated using :meth:`_engine.Connection.invalidate`. These + events may be intercepted using the :meth:`.PoolEvents.soft_invalidate` + and :meth:`.PoolEvents.invalidate` event hooks, and all "connection + close" events may be intercepted using :meth:`.PoolEvents.close`. The :meth:`_events.PoolEvents.reset` event is usually followed by the - :meth:`_events.PoolEvents.checkin` event is called, except in those + :meth:`_events.PoolEvents.checkin` event, except in those cases where the connection is discarded immediately after reset. + In the 1.4 series, the event is also not invoked for asyncio + connections that are being garbage collected without their being + explicitly returned to the pool. This is due to the lack of an event + loop which prevents "reset" operations from taking place. Version 2.0 + will feature an enhanced version of :meth:`.PoolEvents.reset` which is + invoked in this scenario while passing additional contextual + information indicating that an event loop is not guaranteed + to be present. + :param dbapi_connection: a DBAPI connection. The :attr:`._ConnectionRecord.dbapi_connection` attribute. diff --git a/test/engine/test_logging.py b/test/engine/test_logging.py index 5b0d6c762e2..ded91490396 100644 --- a/test/engine/test_logging.py +++ b/test/engine/test_logging.py @@ -449,6 +449,14 @@ def _test_queuepool(self, q, dispose=True): conn.close() conn = None + conn = q.connect() + conn._close_special(transaction_reset=True) + conn = None + + conn = q.connect() + conn._close_special(transaction_reset=False) + conn = None + conn = q.connect() conn = None del conn @@ -460,13 +468,19 @@ def _test_queuepool(self, q, dispose=True): [ "Created new connection %r", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", + "Connection %s rollback-on-return", + "Connection %r checked out from pool", + "Connection %r being returned to pool", "Connection %s rollback-on-return", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", + "Connection %s reset, transaction already reset", + "Connection %r checked out from pool", + "Connection %r being returned to pool", "Connection %s rollback-on-return", "Connection %r checked out from pool", - "Connection %r being returned to pool%s", + "Connection %r being returned to pool", "Connection %s rollback-on-return", "%s connection %r", ] diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 879369a9ffd..7a3b8ed58dc 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -5,6 +5,7 @@ import weakref import sqlalchemy as tsa +from sqlalchemy import create_engine from sqlalchemy import event from sqlalchemy import pool from sqlalchemy import select @@ -1922,14 +1923,90 @@ def _fixture(self, **kw): pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw), ) - def test_plain_rollback(self): + def _engine_fixture(self, **kw): + dbapi = Mock() + + return dbapi, create_engine( + "postgresql://", + module=dbapi, + creator=lambda: dbapi.connect("foo.db"), + _initialize=False, + ) + + def test_custom(self): + dbapi, p = self._fixture(reset_on_return=None) + + @event.listens_for(p, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = p.connect() + c1.close() + + assert dbapi.connect().special_reset_method.called + assert not dbapi.connect().rollback.called + assert not dbapi.connect().commit.called + + @testing.combinations(True, False, argnames="assert_w_event") + @testing.combinations(True, False, argnames="use_engine_transaction") + def test_custom_via_engine(self, assert_w_event, use_engine_transaction): + dbapi, engine = self._engine_fixture(reset_on_return=None) + + if assert_w_event: + + @event.listens_for(engine, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = engine.connect() + if use_engine_transaction: + c1.begin() + c1.close() + assert dbapi.connect().rollback.called + + if assert_w_event: + assert dbapi.connect().special_reset_method.called + + @testing.combinations(True, False, argnames="assert_w_event") + def test_plain_rollback(self, assert_w_event): dbapi, p = self._fixture(reset_on_return="rollback") + if assert_w_event: + + @event.listens_for(p, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + c1 = p.connect() c1.close() assert dbapi.connect().rollback.called assert not dbapi.connect().commit.called + if assert_w_event: + assert dbapi.connect().special_reset_method.called + + @testing.combinations(True, False, argnames="assert_w_event") + @testing.combinations(True, False, argnames="use_engine_transaction") + def test_plain_rollback_via_engine( + self, assert_w_event, use_engine_transaction + ): + dbapi, engine = self._engine_fixture(reset_on_return="rollback") + + if assert_w_event: + + @event.listens_for(engine, "reset") + def custom_reset(dbapi_conn, record): + dbapi_conn.special_reset_method() + + c1 = engine.connect() + if use_engine_transaction: + c1.begin() + c1.close() + assert dbapi.connect().rollback.called + + if assert_w_event: + assert dbapi.connect().special_reset_method.called + def test_plain_commit(self): dbapi, p = self._fixture(reset_on_return="commit") From 766d329f83cd7a2f65114cf5a628087bbfef151c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 31 Oct 2022 15:09:34 -0400 Subject: [PATCH 418/632] use simple decimal query to detect decimal char Fixed issue where the ``nls_session_parameters`` view queried on first connect in order to get the default decimal point character may not be available depending on Oracle connection modes, and would therefore raise an error. The approach to detecting decimal char has been simplified to test a decimal value directly, instead of reading system views, which works on any backend / driver. Fixes: #8744 Change-Id: I39825131c13513798863197d0c180dd5a18b32dc (cherry picked from commit 12b334417bf67c1ed302d30787e4c2dfae7ee335) --- doc/build/changelog/unreleased_14/8744.rst | 11 ++ lib/sqlalchemy/dialects/oracle/cx_oracle.py | 31 +++++- test/dialect/oracle/test_types.py | 109 ++++++++++---------- 3 files changed, 95 insertions(+), 56 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8744.rst diff --git a/doc/build/changelog/unreleased_14/8744.rst b/doc/build/changelog/unreleased_14/8744.rst new file mode 100644 index 00000000000..6d24f0ff930 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8744.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, oracle + :tickets: 8744 + + Fixed issue where the ``nls_session_parameters`` view queried on first + connect in order to get the default decimal point character may not be + available depending on Oracle connection modes, and would therefore raise + an error. The approach to detecting decimal char has been simplified to + test a decimal value directly, instead of reading system views, which + works on any backend / driver. + diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 20afff656d3..90dabc83b93 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1140,10 +1140,33 @@ def _detect_decimal_char(self, connection): # NLS_TERRITORY or formatting behavior of the DB, we opt # to just look at it - self._decimal_char = connection.exec_driver_sql( - "select value from nls_session_parameters " - "where parameter = 'NLS_NUMERIC_CHARACTERS'" - ).scalar()[0] + dbapi_connection = connection.connection + + with dbapi_connection.cursor() as cursor: + # issue #8744 + # nls_session_parameters is not available in some Oracle + # modes like "mount mode". But then, v$nls_parameters is not + # available if the connection doesn't have SYSDBA priv. + # + # simplify the whole thing and just use the method that we were + # doing in the test suite already, selecting a number + + def output_type_handler( + cursor, name, defaultType, size, precision, scale + ): + return cursor.var( + self.dbapi.STRING, 255, arraysize=cursor.arraysize + ) + + cursor.outputtypehandler = output_type_handler + cursor.execute("SELECT 1.1 FROM DUAL") + value = cursor.fetchone()[0] + + decimal_char = value.lstrip("0")[1] + assert not decimal_char[0].isdigit() + + self._decimal_char = decimal_char + if self._decimal_char != ".": _detect_decimal = self._detect_decimal _to_decimal = self._to_decimal diff --git a/test/dialect/oracle/test_types.py b/test/dialect/oracle/test_types.py index 70b00c06f20..842defb4bab 100644 --- a/test/dialect/oracle/test_types.py +++ b/test/dialect/oracle/test_types.py @@ -1103,62 +1103,67 @@ def _creator(): def teardown_test(self): self.engine.dispose() - def test_were_getting_a_comma(self): - connection = self.engine.pool._creator() - cursor = connection.cursor() - try: - cx_Oracle = self.engine.dialect.dbapi + def test_detection(self): + # revised as of #8744 + with self.engine.connect() as conn: + connection = conn.connection - def output_type_handler( - cursor, name, defaultType, size, precision, scale - ): - return cursor.var( - cx_Oracle.STRING, 255, arraysize=cursor.arraysize - ) + with connection.cursor() as cursor: + cx_Oracle = self.engine.dialect.dbapi - cursor.outputtypehandler = output_type_handler - cursor.execute("SELECT 1.1 FROM DUAL") - row = cursor.fetchone() - eq_(row[0], "1,1") - finally: - cursor.close() - connection.close() + def output_type_handler( + cursor, name, defaultType, size, precision, scale + ): + return cursor.var( + cx_Oracle.STRING, 255, arraysize=cursor.arraysize + ) - def test_output_type_handler(self): - with self.engine.connect() as conn: - for stmt, exp, kw in [ - ("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}), - ("SELECT CAST(15 AS INTEGER) FROM DUAL", 15, {}), - ( - "SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", - decimal.Decimal("15"), - {}, - ), - ( - "SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", - decimal.Decimal("0.1"), - {}, - ), - ( - "SELECT :num FROM DUAL", - decimal.Decimal("2.5"), - {"num": decimal.Decimal("2.5")}, - ), - ( - text( - "SELECT CAST(28.532 AS NUMERIC(5, 3)) " - "AS val FROM DUAL" - ).columns(val=Numeric(5, 3, asdecimal=True)), - decimal.Decimal("28.532"), - {}, - ), - ]: - if isinstance(stmt, util.string_types): - test_exp = conn.exec_driver_sql(stmt, kw).scalar() + cursor.outputtypehandler = output_type_handler + cursor.execute("SELECT 1.1 FROM DUAL") + row = cursor.fetchone() + decimal_char = row[0][1] + + if testing.against("+cx_oracle"): + eq_(decimal_char, ",") else: - test_exp = conn.scalar(stmt, **kw) - eq_(test_exp, exp) - assert type(test_exp) is type(exp) + assert decimal_char in ",." + + eq_(conn.dialect._decimal_char, decimal_char) + + @testing.combinations( + ("SELECT 0.1 FROM DUAL", decimal.Decimal("0.1"), {}), + ("SELECT CAST(15 AS INTEGER) FROM DUAL", 15, {}), + ( + "SELECT CAST(15 AS NUMERIC(3, 1)) FROM DUAL", + decimal.Decimal("15"), + {}, + ), + ( + "SELECT CAST(0.1 AS NUMERIC(5, 2)) FROM DUAL", + decimal.Decimal("0.1"), + {}, + ), + ( + "SELECT :num FROM DUAL", + decimal.Decimal("2.5"), + {"num": decimal.Decimal("2.5")}, + ), + ( + text( + "SELECT CAST(28.532 AS NUMERIC(5, 3)) " "AS val FROM DUAL" + ).columns(val=Numeric(5, 3, asdecimal=True)), + decimal.Decimal("28.532"), + {}, + ), + ) + def test_output_type_handler(self, stmt, expected, kw): + with self.engine.connect() as conn: + if isinstance(stmt, str): + test_exp = conn.exec_driver_sql(stmt, kw).scalar() + else: + test_exp = conn.scalar(stmt, **kw) + eq_(test_exp, expected) + assert type(test_exp) is type(expected) class SetInputSizesTest(fixtures.TestBase): From e3df120d45f64561013aa2abeba9014fc8be3395 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 1 Nov 2022 22:57:16 -0400 Subject: [PATCH 419/632] scale back warnings for 1.4's bulk methods since these methods have been improved for 2.0, the general idea is not going away, so remove the warnings indicating that these features are being removed. Change-Id: I2c436c2e7f9aeacc9e71c82af4016190314f04ca --- doc/build/orm/persistence_techniques.rst | 27 +++++++++---- lib/sqlalchemy/orm/session.py | 48 +++++++++--------------- 2 files changed, 37 insertions(+), 38 deletions(-) diff --git a/doc/build/orm/persistence_techniques.rst b/doc/build/orm/persistence_techniques.rst index 09d1948e882..e6d4941f839 100644 --- a/doc/build/orm/persistence_techniques.rst +++ b/doc/build/orm/persistence_techniques.rst @@ -881,18 +881,27 @@ ORM extension. An example of use is at: :ref:`examples_sharding`. Bulk Operations =============== -.. deepalchemy:: Bulk operations are essentially lower-functionality versions +.. tip:: + + Bulk operations are essentially lower-functionality versions of the Unit of Work's facilities for emitting INSERT and UPDATE statements on primary key targeted rows. These routines were added to suit some cases where many rows being inserted or updated could be run into the - database without as much of the usual unit of work overhead, in that - most unit of work features are **disabled**. + database without as much of the usual unit of work overhead, by bypassing + a large portion of the functionality that the unit of work provides. + + SQLAlchemy 2.0 features new and improved bulk techniques with clarified + behavior, better integration with ORM objects as well as INSERT/UPDATE/DELETE + statements, and new capabilities. They additionally repair some long lived + performance issues that plagued both regular unit of work and "bulk" routines, + most notably in the area of INSERT operations. + + For these reasons, the previous bulk methods move into legacy status, which + is revised from the original plan that "bulk" features were to be deprecated + entirely. - There is **usually no need to use these routines, and they are not easy - to use as there are many missing behaviors that are usually expected when - using ORM objects**; for efficient - bulk inserts, it's better to use the Core :class:`_sql.Insert` construct - directly. Please read all caveats at :ref:`bulk_operations_caveats`. + When using the legacy 1.4 versions of these features, please read all + caveats at :ref:`bulk_operations_caveats`, as they are not always obvious. .. note:: Bulk INSERT and UPDATE should not be confused with the more common feature known as :ref:`orm_expression_update_delete`. This @@ -974,11 +983,13 @@ transaction, like any other:: s = Session() objects = [User(name="u1"), User(name="u2"), User(name="u3")] s.bulk_save_objects(objects) + s.commit() For :meth:`.Session.bulk_insert_mappings`, and :meth:`.Session.bulk_update_mappings`, dictionaries are passed:: s.bulk_insert_mappings(User, [dict(name="u1"), dict(name="u2"), dict(name="u3")]) + s.commit() .. seealso:: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 79b723184d3..ac02870d92e 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -3595,23 +3595,19 @@ def bulk_save_objects( as an alternative newer mass-insert features such as :ref:`orm_dml_returning_objects`. - .. warning:: + .. legacy:: The bulk save feature allows for a lower-latency INSERT/UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw INSERT/UPDATES of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** @@ -3716,23 +3712,19 @@ def bulk_insert_mappings( .. versionadded:: 1.0.0 - .. warning:: + .. legacy:: The bulk insert feature allows for a lower-latency INSERT of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw INSERT of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** @@ -3817,23 +3809,19 @@ def bulk_update_mappings(self, mapper, mappings): .. versionadded:: 1.0.0 - .. warning:: + .. legacy:: The bulk update feature allows for a lower-latency UPDATE of rows at the expense of most other unit-of-work features. Features such as object management, relationship handling, - and SQL clause support are **silently omitted** in favor of raw + and SQL clause support are silently omitted in favor of raw UPDATES of records. - Please note that newer versions of SQLAlchemy are **greatly - improving the efficiency** of the standard flush process. It is - **strongly recommended** to not use the bulk methods as they - represent a forking of SQLAlchemy's functionality and are slowly - being moved into legacy status. New features such as - :ref:`orm_dml_returning_objects` are both more efficient than - the "bulk" methods and provide more predictable functionality. + In SQLAlchemy 2.0, improved versions of the bulk insert/update + methods are introduced, with clearer behavior and + documentation, new capabilities, and much better performance. - **Please read the list of caveats at** + For 1.4 use, **please read the list of caveats at** :ref:`bulk_operations_caveats` **before using this method, and fully test and confirm the functionality of all code developed using these systems.** From 55d598e7641e7e5404edac09f470f07ac5247fb4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 2 Nov 2022 08:39:39 -0400 Subject: [PATCH 420/632] fix event name Change-Id: I26af2326034be07f0ebc91dfbf31d00c40acf585 References: #8717 (cherry picked from commit 7249fa4a51f20c6c62ba94241900600023662bdb) --- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 6820aa60154..c94c7732545 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -274,7 +274,7 @@ def use_identity(element, compiler, **kw): @event.listens_for(postgresql_engine, "reset") - def _reset_mssql(dbapi_connection, connection_record, reset_state): + def _reset_postgresql(dbapi_connection, connection_record, reset_state): dbapi_connection.execute("CLOSE ALL") dbapi_connection.execute("RESET ALL") dbapi_connection.execute("DISCARD TEMP") From a333d1c3a51099447fab47d3d0d03b3b61c268e4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 1 Nov 2022 15:09:25 -0400 Subject: [PATCH 421/632] soft close cursor for Query direct iterator interrupted Fixed issue where the underlying DBAPI cursor would not be closed when using :class:`_orm.Query` and direct iteration, if a user-defined exception case were raised within the iteration process, interrupting the iterator which otherwise is not possible to re-use in this context. When using :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead to the usual MySQL-related issues with server side cursors out of sync. To resolve, a catch for ``GeneratorExit`` is applied within the default iterator, which applies only in those cases where the interpreter is calling ``.close()`` on the iterator in any case. A similar scenario can occur when using :term:`2.x` executions with direct use of :class:`.Result`, in that case the end-user code has access to the :class:`.Result` itself and should call :meth:`.Result.close` directly. Version 2.0 will feature context-manager calling patterns to address this use case. However within the 1.4 scope, ensured that ``.close()`` methods are available on all :class:`.Result` implementations including :class:`.ScalarResult`, :class:`.MappingResult`. Fixes: #8710 Change-Id: I3166328bfd3900957eb33cbf1061d0495c9df670 --- doc/build/changelog/unreleased_14/8710.rst | 30 ++++++ lib/sqlalchemy/engine/result.py | 45 +++++++++ lib/sqlalchemy/orm/query.py | 10 +- test/base/test_result.py | 15 +++ test/orm/test_loading.py | 19 ++++ test/orm/test_query.py | 51 ++++++++++ test/sql/test_resultset.py | 106 +++++++++++++++++++++ 7 files changed, 275 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8710.rst diff --git a/doc/build/changelog/unreleased_14/8710.rst b/doc/build/changelog/unreleased_14/8710.rst new file mode 100644 index 00000000000..246658896e8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8710.rst @@ -0,0 +1,30 @@ +.. change:: + :tags: bug, orm + :tickets: 8710 + + Fixed issue where the underlying DBAPI cursor would not be closed when + using :class:`_orm.Query` and direct iteration, if a user-defined exception + case were raised within the iteration process, interrupting the iterator + which otherwise is not possible to re-use in this context. When using + :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead + to the usual MySQL-related issues with server side cursors out of sync. + + To resolve, a catch for ``GeneratorExit`` is applied within the default + iterator, which applies only in those cases where the interpreter is + calling ``.close()`` on the iterator in any case. + + A similar scenario can occur when using :term:`2.x` executions with direct + use of :class:`.Result`, in that case the end-user code has access to the + :class:`.Result` itself and should call :meth:`.Result.close` directly. + Version 2.0 will feature context-manager calling patterns to address this + use case. However within the 1.4 scope, ensured that ``.close()`` methods + are available on all :class:`.Result` implementations including + :class:`.ScalarResult`, :class:`.MappingResult`. + +.. change:: + :tags: bug, engine + :tickets: 8710 + + Ensured all :class:`.Result` objects include a :meth:`.Result.close` method + as well as a :attr:`.Result.closed` attribute, including on + :class:`.ScalarResult` and :class:`.MappingResult`. diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 1fd4e1c92f2..898d3d88cd5 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -822,6 +822,19 @@ def yield_per(self, num): """ self._yield_per = num + @property + def _soft_closed(self): + raise NotImplementedError() + + @property + def closed(self): + """return True if this :class:`.Result` reports .closed + + .. versionadded:: 1.4.43 + + """ + raise NotImplementedError() + @_generative def unique(self, strategy=None): """Apply unique filtering to the objects returned by this @@ -1329,6 +1342,27 @@ def yield_per(self, num): def _soft_close(self, hard=False): self._real_result._soft_close(hard=hard) + @property + def _soft_closed(self): + return self._real_result._soft_closed + + @property + def closed(self): + """return True if the underlying result reports .closed + + .. versionadded:: 1.4.43 + + """ + return self._real_result.closed # type: ignore + + def close(self): + """Close this :class:`.FilterResult`. + + .. versionadded:: 1.4.43 + + """ + self._real_result.close() + @property def _attributes(self): return self._real_result._attributes @@ -1704,6 +1738,7 @@ class IteratorResult(Result): """ _hard_closed = False + _soft_closed = False def __init__( self, @@ -1724,6 +1759,16 @@ def _soft_close(self, hard=False, **kw): self.raw._soft_close(hard=hard, **kw) self.iterator = iter([]) self._reset_memoizations() + self._soft_closed = True + + @property + def closed(self): + """return True if this :class:`.IteratorResult` has been closed + + .. versionadded:: 1.4.43 + + """ + return self._hard_closed def _raise_hard_closed(self): raise exc.ResourceClosedError("This result object is closed.") diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 99e45914319..65b6bf81a04 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -2897,7 +2897,15 @@ def scalar(self): return None def __iter__(self): - return self._iter().__iter__() + result = self._iter() + try: + for row in result: + yield row + except GeneratorExit: + # issue #8710 - direct iteration is not re-usable after + # an iterable block is broken, so close the result + result._soft_close() + raise def _iter(self): # new style execution. diff --git a/test/base/test_result.py b/test/base/test_result.py index 76156db1c38..c7b18fed384 100644 --- a/test/base/test_result.py +++ b/test/base/test_result.py @@ -224,6 +224,21 @@ def _fixture( return res + def test_close_attributes(self): + """test #8710""" + r1 = self._fixture() + + is_false(r1.closed) + is_false(r1._soft_closed) + + r1._soft_close() + is_false(r1.closed) + is_true(r1._soft_closed) + + r1.close() + is_true(r1.closed) + is_true(r1._soft_closed) + def test_class_presented(self): """To support different kinds of objects returned vs. rows, there are two wrapper classes for Result. diff --git a/test/orm/test_loading.py b/test/orm/test_loading.py index cc3c3f49424..d0b5c9d8f9c 100644 --- a/test/orm/test_loading.py +++ b/test/orm/test_loading.py @@ -6,6 +6,7 @@ from sqlalchemy import text from sqlalchemy.orm import loading from sqlalchemy.orm import relationship +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message @@ -152,6 +153,24 @@ class InstancesTest(_fixtures.FixtureTest): def setup_mappers(cls): cls._setup_stock_mapping() + def test_cursor_close_exception_raised_in_iteration(self): + """test #8710""" + + User = self.classes.User + s = fixture_session() + + stmt = select(User).execution_options(yield_per=1) + + result = s.execute(stmt) + raw_cursor = result.raw + + for row in result: + with expect_raises_message(Exception, "whoops"): + for row in result: + raise Exception("whoops") + + is_true(raw_cursor._soft_closed) + def test_cursor_close_w_failed_rowproc(self): User = self.classes.User s = fixture_session() diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 9779462a246..2a0eb892656 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -5390,6 +5390,57 @@ def test_cursor_is_closed_on_exhausted(self, fetch_method): result.close() assert_raises(sa.exc.ResourceClosedError, result.all) + def test_yield_per_close_on_interrupted_iteration_legacy(self): + """test #8710""" + + self._eagerload_mappings() + + User = self.classes.User + + asserted_result = [None] + + class _Query(Query): + def _iter(self): + asserted_result[0] = super(_Query, self)._iter() + return asserted_result[0] + + sess = fixture_session(query_cls=_Query) + + with expect_raises_message(Exception, "hi"): + for i, row in enumerate(sess.query(User).yield_per(1)): + assert not asserted_result[0]._soft_closed + assert not asserted_result[0].closed + + if i > 1: + raise Exception("hi") + + assert asserted_result[0]._soft_closed + assert not asserted_result[0].closed + + def test_yield_per_close_on_interrupted_iteration(self): + """test #8710""" + + self._eagerload_mappings() + + User = self.classes.User + + sess = fixture_session() + + with expect_raises_message(Exception, "hi"): + result = sess.execute(select(User).execution_options(yield_per=1)) + for i, row in enumerate(result): + assert not result._soft_closed + assert not result.closed + + if i > 1: + raise Exception("hi") + + assert not result._soft_closed + assert not result.closed + result.close() + assert result._soft_closed + assert result.closed + def test_yield_per_and_execution_options_legacy(self): self._eagerload_mappings() diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 5d29b0b2b1f..11d58a57a24 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -50,6 +50,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true from sqlalchemy.testing import le_ from sqlalchemy.testing import mock @@ -1942,6 +1943,70 @@ def test_stream_options( partition = next(result.partitions()) eq_(len(partition), value) + @testing.fixture + def autoclose_row_fixture(self, connection): + users = self.tables.users + connection.execute( + users.insert(), + [ + {"user_id": 1, "name": "u1"}, + {"user_id": 2, "name": "u2"}, + {"user_id": 3, "name": "u3"}, + {"user_id": 4, "name": "u4"}, + {"user_id": 5, "name": "u5"}, + ], + ) + + @testing.fixture(params=["plain", "scalars", "mapping"]) + def result_fixture(self, request, connection): + users = self.tables.users + + result_type = request.param + + if result_type == "plain": + result = connection.execute(select(users)) + elif result_type == "scalars": + result = connection.scalars(select(users)) + elif result_type == "mapping": + result = connection.execute(select(users)).mappings() + else: + assert False + + return result + + def test_results_can_close(self, autoclose_row_fixture, result_fixture): + """test #8710""" + + r1 = result_fixture + + is_false(r1.closed) + is_false(r1._soft_closed) + + r1._soft_close() + is_false(r1.closed) + is_true(r1._soft_closed) + + r1.close() + is_true(r1.closed) + is_true(r1._soft_closed) + + def test_autoclose_rows_exhausted_plain( + self, connection, autoclose_row_fixture, result_fixture + ): + result = result_fixture + + assert not result._soft_closed + assert not result.closed + + read_iterator = list(result) + eq_(len(read_iterator), 5) + + assert result._soft_closed + assert not result.closed + + result.close() + assert result.closed + class KeyTargetingTest(fixtures.TablesTest): run_inserts = "once" @@ -2966,6 +3031,47 @@ def test_buffered_fetchmany_yield_per(self, connection): # buffer of 98, plus buffer of 99 - 89, 10 rows eq_(len(result.cursor_strategy._rowbuffer), 10) + for i, row in enumerate(result): + if i == 206: + break + + eq_(i, 206) + + def test_iterator_remains_unbroken(self, connection): + """test related to #8710. + + demonstrate that we can't close the cursor by catching + GeneratorExit inside of our iteration. Leaving the iterable + block using break, then picking up again, would be directly + impacted by this. So this provides a clear rationale for + providing context manager support for result objects. + + """ + table = self.tables.test + + connection.execute( + table.insert(), + [{"x": i, "y": "t_%d" % i} for i in range(15, 250)], + ) + + result = connection.execute(table.select()) + result = result.yield_per(100) + for i, row in enumerate(result): + if i == 188: + # this will raise GeneratorExit inside the iterator. + # so we can't close the DBAPI cursor here, we have plenty + # more rows to yield + break + + eq_(i, 188) + + # demonstrate getting more rows + for i, row in enumerate(result, 188): + if i == 206: + break + + eq_(i, 206) + @testing.combinations(True, False, argnames="close_on_init") @testing.combinations( "fetchone", "fetchmany", "fetchall", argnames="fetch_style" From b0885d58328137d274a2872aa47d041e1de553d6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 12:48:43 -0400 Subject: [PATCH 422/632] resolve synonyms in dictionary form of Session.get() Improved "dictionary mode" for :meth:`_orm.Session.get` so that synonym names which refer to primary key attribute names may be indicated in the named dictionary. Fixes: #8753 Change-Id: I56112564a5c23b51b26e01c64087cbf4399cd951 (cherry picked from commit 7b6259c0f3ae411976f8febfe41f2c5fc3490b13) --- doc/build/changelog/unreleased_14/8753.rst | 7 ++++++ lib/sqlalchemy/orm/mapper.py | 18 ++++++++++++++++ lib/sqlalchemy/orm/session.py | 17 ++++++++++++++- test/orm/test_query.py | 25 ++++++++++++++++++++++ 4 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8753.rst diff --git a/doc/build/changelog/unreleased_14/8753.rst b/doc/build/changelog/unreleased_14/8753.rst new file mode 100644 index 00000000000..6f898e9a064 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8753.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, orm + :tickets: 8753 + + Improved "dictionary mode" for :meth:`_orm.Session.get` so that synonym + names which refer to primary key attribute names may be indicated in the + named dictionary. diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 97509515b8a..48f9031430f 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -2575,6 +2575,24 @@ class in which it first appeared. dict(self.class_manager._all_sqla_attributes()) ) + @HasMemoized.memoized_attribute + @util.preload_module("sqlalchemy.orm.descriptor_props") + def _pk_synonyms(self): + """return a dictionary of {syn_attribute_name: pk_attr_name} for + all synonyms that refer to primary key columns + + """ + descriptor_props = util.preloaded.orm_descriptor_props + + pk_keys = {prop.key for prop in self._identity_key_props} + + return { + syn.key: syn.name + for k, syn in self._props.items() + if isinstance(syn, descriptor_props.SynonymProperty) + and syn.name in pk_keys + } + @HasMemoized.memoized_attribute @util.preload_module("sqlalchemy.orm.descriptor_props") def synonyms(self): diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index ac02870d92e..7d32362b50e 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -2893,6 +2893,21 @@ def _get_impl( ) if is_dict: + + pk_synonyms = mapper._pk_synonyms + + if pk_synonyms: + correct_keys = set(pk_synonyms).intersection( + primary_key_identity + ) + + if correct_keys: + primary_key_identity = dict(primary_key_identity) + for k in correct_keys: + primary_key_identity[ + pk_synonyms[k] + ] = primary_key_identity[k] + try: primary_key_identity = list( primary_key_identity[prop.key] @@ -2904,7 +2919,7 @@ def _get_impl( sa_exc.InvalidRequestError( "Incorrect names of values in identifier to formulate " "primary key for session.get(); primary key attribute " - "names are %s" + "names are %s (synonym names are also accepted)" % ",".join( "'%s'" % prop.key for prop in mapper._identity_key_props diff --git a/test/orm/test_query.py b/test/orm/test_query.py index 2a0eb892656..a7fbf069ecf 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -1210,6 +1210,31 @@ def test_get(self): u2 = s.get(User, 7) assert u is not u2 + def test_get_synonym_direct_name(self, decl_base): + """test #8753""" + + class MyUser(decl_base): + __table__ = self.tables.users + + syn_id = synonym("id") + + s = fixture_session() + u = s.get(MyUser, {"syn_id": 7}) + eq_(u.id, 7) + + def test_get_synonym_indirect(self, decl_base): + """test #8753""" + + class MyUser(decl_base): + __table__ = self.tables.users + + uid = __table__.c.id + syn_id = synonym("uid") + + s = fixture_session() + u = s.get(MyUser, {"syn_id": 7}) + eq_(u.uid, 7) + def test_get_composite_pk_no_result(self): CompositePk = self.classes.CompositePk From e9fac75ebb5f011d8e5e9824cfac8eabddd86d3a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 16:16:48 -0400 Subject: [PATCH 423/632] changelog updates as this release cycle was fairly frenetic, a lot of these changelogs were very poorly worded (by me). Change-Id: Idb796cf3e25975fb2f75bacf26f1cb57ef0e4cad (cherry picked from commit 751469240a1f2908d16ca2b087b5dac55dbdcb47) --- doc/build/changelog/unreleased_14/8700.rst | 8 +++--- doc/build/changelog/unreleased_14/8704.rst | 2 +- doc/build/changelog/unreleased_14/8710.rst | 29 +++++++++++---------- doc/build/changelog/unreleased_14/8711.rst | 11 ++++---- doc/build/changelog/unreleased_14/8714.rst | 8 +++--- doc/build/changelog/unreleased_14/8717.rst | 30 ++++++++++++---------- doc/build/changelog/unreleased_14/8721.rst | 21 +++++++-------- doc/build/changelog/unreleased_14/8738.rst | 7 ++--- 8 files changed, 60 insertions(+), 56 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8700.rst b/doc/build/changelog/unreleased_14/8700.rst index 205f251ef40..0805c233843 100644 --- a/doc/build/changelog/unreleased_14/8700.rst +++ b/doc/build/changelog/unreleased_14/8700.rst @@ -2,9 +2,9 @@ :tags: bug, mssql, reflection :tickets: 8700 - Fixed issue with :meth:`.Inspector.has_table` when used against a view for - the SQL Server dialect would erroneously return ``False``, due to a - regression in the 1.4 series which removed support for this on SQL Server. - The issue is not present in the 2.0 series which uses a different + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + view with the SQL Server dialect would erroneously return ``False``, due to + a regression in the 1.4 series which removed support for this on SQL + Server. The issue is not present in the 2.0 series which uses a different reflection architecture. Test support is added to ensure ``has_table()`` remains working per spec re: views. diff --git a/doc/build/changelog/unreleased_14/8704.rst b/doc/build/changelog/unreleased_14/8704.rst index 7327c95313e..90d97287062 100644 --- a/doc/build/changelog/unreleased_14/8704.rst +++ b/doc/build/changelog/unreleased_14/8704.rst @@ -3,6 +3,6 @@ :tickets: 8704 Fixed issue where "selectin_polymorphic" loading for inheritance mappers - would not function correctly if the :param:`_orm.Mapper.polymorphic_on` + would not function correctly if the :paramref:`_orm.Mapper.polymorphic_on` parameter referred to a SQL expression that was not directly mapped on the class. diff --git a/doc/build/changelog/unreleased_14/8710.rst b/doc/build/changelog/unreleased_14/8710.rst index 246658896e8..4d4597d7f85 100644 --- a/doc/build/changelog/unreleased_14/8710.rst +++ b/doc/build/changelog/unreleased_14/8710.rst @@ -3,23 +3,24 @@ :tickets: 8710 Fixed issue where the underlying DBAPI cursor would not be closed when - using :class:`_orm.Query` and direct iteration, if a user-defined exception - case were raised within the iteration process, interrupting the iterator - which otherwise is not possible to re-use in this context. When using + using the :class:`_orm.Query` object as an iterator, if a user-defined exception + case were raised within the iteration process, thereby causing the iterator + to be closed by the Python interpreter. When using :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead - to the usual MySQL-related issues with server side cursors out of sync. + to the usual MySQL-related issues with server side cursors out of sync, + and without direct access to the :class:`.Result` object, end-user code + could not access the cursor in order to close it. - To resolve, a catch for ``GeneratorExit`` is applied within the default - iterator, which applies only in those cases where the interpreter is - calling ``.close()`` on the iterator in any case. + To resolve, a catch for ``GeneratorExit`` is applied within the iterator + method, which will close the result object in those cases when the + iterator were interrupted, and by definition will be closed by the + Python interpreter. - A similar scenario can occur when using :term:`2.x` executions with direct - use of :class:`.Result`, in that case the end-user code has access to the - :class:`.Result` itself and should call :meth:`.Result.close` directly. - Version 2.0 will feature context-manager calling patterns to address this - use case. However within the 1.4 scope, ensured that ``.close()`` methods - are available on all :class:`.Result` implementations including - :class:`.ScalarResult`, :class:`.MappingResult`. + As part of this change as implemented for the 1.4 series, ensured that + ``.close()`` methods are available on all :class:`.Result` implementations + including :class:`.ScalarResult`, :class:`.MappingResult`. The 2.0 + version of this change also includes new context manager patterns for use + with :class:`.Result` classes. .. change:: :tags: bug, engine diff --git a/doc/build/changelog/unreleased_14/8711.rst b/doc/build/changelog/unreleased_14/8711.rst index 82e68bbc439..cc76eb461cd 100644 --- a/doc/build/changelog/unreleased_14/8711.rst +++ b/doc/build/changelog/unreleased_14/8711.rst @@ -2,8 +2,9 @@ :tags: bug, orm :tickets: 8711 - Fixed the exception that's raised when the - :func:`_orm.with_loader_criteria` option is attempted to be used within a - specific loader path, like in loader.options(). - :func:`_orm.with_loader_criteria` is only intended to be used at the top - level. + An informative exception is now raised when the + :func:`_orm.with_loader_criteria` option is used as a loader option added + to a specific "loader path", such as when using it within + :meth:`.Load.options`. This use is not supported as + :func:`_orm.with_loader_criteria` is only intended to be used as a top + level loader option. Previously, an internal error would be generated. diff --git a/doc/build/changelog/unreleased_14/8714.rst b/doc/build/changelog/unreleased_14/8714.rst index d75f2570edc..6fd133a091a 100644 --- a/doc/build/changelog/unreleased_14/8714.rst +++ b/doc/build/changelog/unreleased_14/8714.rst @@ -2,7 +2,7 @@ :tags: bug, mssql :tickets: 8714 - Fixed issue with :meth:`.Inspector.has_table` when used against a temporary - table for the SQL Server dialect would fail an invalid object name error on - some Azure variants, due to an unnecessary information schema query that is - not supported on those server versions. Pull request courtesy Mike Barry. + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + temporary table with the SQL Server dialect would fail on some Azure + variants, due to an unnecessary information schema query that is not + supported on those server versions. Pull request courtesy Mike Barry. diff --git a/doc/build/changelog/unreleased_14/8717.rst b/doc/build/changelog/unreleased_14/8717.rst index 4f3c5cd472f..dba7b830c6f 100644 --- a/doc/build/changelog/unreleased_14/8717.rst +++ b/doc/build/changelog/unreleased_14/8717.rst @@ -3,17 +3,21 @@ :tickets: 8717 Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be - called when a :class:`.Connection` were closed which already called - ``.rollback()`` on its own transaction, due to an enhancement in the 1.4 - series that ensures ``.rollback()`` is only called once in this scenario, - rather than twice. This would prevent custom pool reset schemes from being - used within this hook. This was a regression that appeared in version 1.4. - - For version 1.4, the :meth:`.PoolEvents.checkin` likely remains a better - event to use for custom "reset" implementations. Version 2.0 will feature - an improved version of :meth:`.PoolEvents.reset` which is called for - additional scenarios such as termination of asyncio connections, and is - also passed contextual information about the reset, to allow for "custom - connection reset" schemes which can respond to different reset scenarios in - different ways. + be called in all cases when a :class:`.Connection` were closed and was + in the process of returning its DBAPI connection to the connection pool. + The scenario was when the :class:`.Connection` had already emitted + ``.rollback()`` on its DBAPI connection within the process of returning + the connection to the pool, where it would then instruct the connection + pool to forego doing its own "reset" to save on the additional method + call. However, this prevented custom pool reset schemes from being + used within this hook, as such hooks by definition are doing more than + just calling ``.rollback()``, and need to be invoked under all + circumstances This was a regression that appeared in version 1.4. + For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an + alternate event hook to use for custom "reset" implementations. Version 2.0 + will feature an improved version of :meth:`.PoolEvents.reset` which is + called for additional scenarios such as termination of asyncio connections, + and is also passed contextual information about the reset, to allow for + "custom connection reset" schemes which can respond to different reset + scenarios in different ways. diff --git a/doc/build/changelog/unreleased_14/8721.rst b/doc/build/changelog/unreleased_14/8721.rst index e6d7f4bf4cc..9e62e66b53e 100644 --- a/doc/build/changelog/unreleased_14/8721.rst +++ b/doc/build/changelog/unreleased_14/8721.rst @@ -2,16 +2,13 @@ :tags: bug, orm :tickets: 8721 - Fixed bug involving :class:`.Select` constructs which used a combination of - :meth:`.Select.select_from` with an ORM entity followed by - :meth:`.Select.join` against the entity sent in - :meth:`.Select.select_from`, as well as using plain - :meth:`.Select.join_from`, which when combined with a columns clause that - didn't explicitly include that entity would then cause "automatic WHERE - criteria" features such as the IN expression required for a single-table - inheritance subclass, as well as the criteria set up by the - :func:`_orm.with_loader_criteria` option, to not be rendered for that - entity. The correct entity is now transferred to the :class:`.Join` object - that's generated internally, so that the criteria against the left - side entity is correctly added. + Fixed bug involving :class:`.Select` constructs, where combinations of + :meth:`.Select.select_from` with :meth:`.Select.join`, as well as when + using :meth:`.Select.join_from`, would cause the + :func:`_orm.with_loader_criteria` feature as well as the IN criteria needed + for single-table inheritance queries to not render, in cases where the + columns clause of the query did not explicitly include the left-hand side + entity of the JOIN. The correct entity is now transferred to the + :class:`.Join` object that's generated internally, so that the criteria + against the left side entity is correctly added. diff --git a/doc/build/changelog/unreleased_14/8738.rst b/doc/build/changelog/unreleased_14/8738.rst index 2372c25afe1..fb7b31ac34c 100644 --- a/doc/build/changelog/unreleased_14/8738.rst +++ b/doc/build/changelog/unreleased_14/8738.rst @@ -1,8 +1,9 @@ .. change:: :tags: bug, orm - :tickets: 8731 + :tickets: 8738 Fixed issue in joined eager loading where an assertion fail would occur - with a particular combination of outer/inner joined eager loads in - conjunction with an inherited subclass mapper as the middle target. + with a particular combination of outer/inner joined eager loads, when + eager loading across three mappers where the middle mapper was + an inherited subclass mapper. From 14cfef495dddb4faea2e4506a00afc9c0f1c01f5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 16:44:29 -0400 Subject: [PATCH 424/632] fix doc symbols for 1.4, these are ambiguous due to the presence of future names Change-Id: I3694e75da73fe20c19445007c8823030789fc6c0 --- doc/build/changelog/unreleased_14/8717.rst | 20 ++++++++++---------- doc/build/core/defaults.rst | 11 ++++++----- doc/build/core/pooling.rst | 2 +- 3 files changed, 17 insertions(+), 16 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8717.rst b/doc/build/changelog/unreleased_14/8717.rst index dba7b830c6f..10c7ceec84c 100644 --- a/doc/build/changelog/unreleased_14/8717.rst +++ b/doc/build/changelog/unreleased_14/8717.rst @@ -2,17 +2,17 @@ :tags: bug, engine, regression :tickets: 8717 - Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be - be called in all cases when a :class:`.Connection` were closed and was + Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be be + called in all cases when a :class:`_engine.Connection` were closed and was in the process of returning its DBAPI connection to the connection pool. - The scenario was when the :class:`.Connection` had already emitted - ``.rollback()`` on its DBAPI connection within the process of returning - the connection to the pool, where it would then instruct the connection - pool to forego doing its own "reset" to save on the additional method - call. However, this prevented custom pool reset schemes from being - used within this hook, as such hooks by definition are doing more than - just calling ``.rollback()``, and need to be invoked under all - circumstances This was a regression that appeared in version 1.4. + The scenario was when the :class:`_engine.Connection` had already emitted + ``.rollback()`` on its DBAPI connection within the process of returning the + connection to the pool, where it would then instruct the connection pool to + forego doing its own "reset" to save on the additional method call. + However, this prevented custom pool reset schemes from being used within + this hook, as such hooks by definition are doing more than just calling + ``.rollback()``, and need to be invoked under all circumstances This was a + regression that appeared in version 1.4. For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an alternate event hook to use for custom "reset" implementations. Version 2.0 diff --git a/doc/build/core/defaults.rst b/doc/build/core/defaults.rst index c875808ec70..ca78e3aa046 100644 --- a/doc/build/core/defaults.rst +++ b/doc/build/core/defaults.rst @@ -403,7 +403,7 @@ table will include: :paramref:`.MetaData.schema` setting on the :class:`.MetaData` in use; see :ref:`sequence_metadata` for background. -When :class:`.Insert` DML constructs are invoked against the ``cartitems`` +When :class:`_dml .Insert` DML constructs are invoked against the ``cartitems`` table, without an explicit value passed for the ``cart_id`` column, the ``cart_id_seq`` sequence will be used to generate a value on participating backends. Typically, the sequence function is embedded in the INSERT statement, @@ -416,10 +416,11 @@ returned to the Python process: VALUES (next_val(cart_id_seq), 'some description', '2015-10-15 12:00:15') RETURNING cart_id -When using :meth:`.Connection.execute` to invoke an :class:`.Insert` construct, -newly generated primary key identifiers, including but not limited to those -generated using :class:`.Sequence`, are available from the :class:`.CursorResult` -construct using the :attr:`.CursorResult.inserted_primary_key` attribute. +When using :meth:`_engine.Connection.execute` to invoke an :class:`_dml.Insert` +construct, newly generated primary key identifiers, including but not limited +to those generated using :class:`.Sequence`, are available from the +:class:`.CursorResult` construct using the +:attr:`.CursorResult.inserted_primary_key` attribute. When the :class:`~sqlalchemy.schema.Sequence` is associated with a :class:`_schema.Column` as its **Python-side** default generator, the diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index c147b1d0b7d..a93b9477f8d 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -153,7 +153,7 @@ performance reasons. This can be affected by using the is also available from :func:`_sa.create_engine` as :paramref:`_sa.create_engine.pool_reset_on_return`, passing a value of ``None``. This is illustrated in the example below, in conjunction with the -:paramref:`.create_engine.isolation_level` parameter setting of +:paramref:`_sa.create_engine.isolation_level` parameter setting of ``AUTOCOMMIT``:: non_acid_engine = create_engine( From 1e08911d5c6f09bcf41f1c8f2628006c21ce979c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 17:00:29 -0400 Subject: [PATCH 425/632] typo Change-Id: I41cb128767c0cba48255cca0904ae1bff1b357ac (cherry picked from commit 6410e372cb344084ee13942bd5b9eb78b24fc50c) --- doc/build/changelog/unreleased_14/8717.rst | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8717.rst b/doc/build/changelog/unreleased_14/8717.rst index 10c7ceec84c..676f2e21525 100644 --- a/doc/build/changelog/unreleased_14/8717.rst +++ b/doc/build/changelog/unreleased_14/8717.rst @@ -5,14 +5,15 @@ Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be be called in all cases when a :class:`_engine.Connection` were closed and was in the process of returning its DBAPI connection to the connection pool. + The scenario was when the :class:`_engine.Connection` had already emitted - ``.rollback()`` on its DBAPI connection within the process of returning the - connection to the pool, where it would then instruct the connection pool to - forego doing its own "reset" to save on the additional method call. - However, this prevented custom pool reset schemes from being used within - this hook, as such hooks by definition are doing more than just calling - ``.rollback()``, and need to be invoked under all circumstances This was a - regression that appeared in version 1.4. + ``.rollback()`` on its DBAPI connection within the process of returning + the connection to the pool, where it would then instruct the connection + pool to forego doing its own "reset" to save on the additional method + call. However, this prevented custom pool reset schemes from being + used within this hook, as such hooks by definition are doing more than + just calling ``.rollback()``, and need to be invoked under all + circumstances. This was a regression that appeared in version 1.4. For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an alternate event hook to use for custom "reset" implementations. Version 2.0 From 274ecb746d559aa9b9e9c788544e8856807ab192 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 17:04:01 -0400 Subject: [PATCH 426/632] - 1.4.43 --- doc/build/changelog/changelog_14.rst | 171 ++++++++++++++++++++- doc/build/changelog/unreleased_14/8700.rst | 10 -- doc/build/changelog/unreleased_14/8704.rst | 8 - doc/build/changelog/unreleased_14/8708.rst | 14 -- doc/build/changelog/unreleased_14/8710.rst | 31 ---- doc/build/changelog/unreleased_14/8711.rst | 10 -- doc/build/changelog/unreleased_14/8714.rst | 8 - doc/build/changelog/unreleased_14/8717.rst | 24 --- doc/build/changelog/unreleased_14/8721.rst | 14 -- doc/build/changelog/unreleased_14/8724.rst | 11 -- doc/build/changelog/unreleased_14/8738.rst | 9 -- doc/build/changelog/unreleased_14/8744.rst | 11 -- doc/build/changelog/unreleased_14/8753.rst | 7 - doc/build/conf.py | 4 +- 14 files changed, 172 insertions(+), 160 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8700.rst delete mode 100644 doc/build/changelog/unreleased_14/8704.rst delete mode 100644 doc/build/changelog/unreleased_14/8708.rst delete mode 100644 doc/build/changelog/unreleased_14/8710.rst delete mode 100644 doc/build/changelog/unreleased_14/8711.rst delete mode 100644 doc/build/changelog/unreleased_14/8714.rst delete mode 100644 doc/build/changelog/unreleased_14/8717.rst delete mode 100644 doc/build/changelog/unreleased_14/8721.rst delete mode 100644 doc/build/changelog/unreleased_14/8724.rst delete mode 100644 doc/build/changelog/unreleased_14/8738.rst delete mode 100644 doc/build/changelog/unreleased_14/8744.rst delete mode 100644 doc/build/changelog/unreleased_14/8753.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3b19d78226a..516694dfe11 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,176 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.43 - :include_notes_from: unreleased_14 + :released: November 4, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8738 + + Fixed issue in joined eager loading where an assertion fail would occur + with a particular combination of outer/inner joined eager loads, when + eager loading across three mappers where the middle mapper was + an inherited subclass mapper. + + + .. change:: + :tags: bug, oracle + :tickets: 8708 + + Fixed issue where bound parameter names, including those automatically + derived from similarly-named database columns, which contained characters + that normally require quoting with Oracle would not be escaped when using + "expanding parameters" with the Oracle dialect, causing execution errors. + The usual "quoting" for bound parameters used by the Oracle dialect is not + used with the "expanding parameters" architecture, so escaping for a large + range of characters is used instead, now using a list of characters/escapes + that are specific to Oracle. + + + + .. change:: + :tags: bug, orm + :tickets: 8721 + + Fixed bug involving :class:`.Select` constructs, where combinations of + :meth:`.Select.select_from` with :meth:`.Select.join`, as well as when + using :meth:`.Select.join_from`, would cause the + :func:`_orm.with_loader_criteria` feature as well as the IN criteria needed + for single-table inheritance queries to not render, in cases where the + columns clause of the query did not explicitly include the left-hand side + entity of the JOIN. The correct entity is now transferred to the + :class:`.Join` object that's generated internally, so that the criteria + against the left side entity is correctly added. + + + .. change:: + :tags: bug, mssql + :tickets: 8714 + + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + temporary table with the SQL Server dialect would fail on some Azure + variants, due to an unnecessary information schema query that is not + supported on those server versions. Pull request courtesy Mike Barry. + + .. change:: + :tags: bug, orm + :tickets: 8711 + + An informative exception is now raised when the + :func:`_orm.with_loader_criteria` option is used as a loader option added + to a specific "loader path", such as when using it within + :meth:`.Load.options`. This use is not supported as + :func:`_orm.with_loader_criteria` is only intended to be used as a top + level loader option. Previously, an internal error would be generated. + + .. change:: + :tags: bug, oracle + :tickets: 8744 + + Fixed issue where the ``nls_session_parameters`` view queried on first + connect in order to get the default decimal point character may not be + available depending on Oracle connection modes, and would therefore raise + an error. The approach to detecting decimal char has been simplified to + test a decimal value directly, instead of reading system views, which + works on any backend / driver. + + + .. change:: + :tags: bug, orm + :tickets: 8753 + + Improved "dictionary mode" for :meth:`_orm.Session.get` so that synonym + names which refer to primary key attribute names may be indicated in the + named dictionary. + + .. change:: + :tags: bug, engine, regression + :tickets: 8717 + + Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be be + called in all cases when a :class:`_engine.Connection` were closed and was + in the process of returning its DBAPI connection to the connection pool. + + The scenario was when the :class:`_engine.Connection` had already emitted + ``.rollback()`` on its DBAPI connection within the process of returning + the connection to the pool, where it would then instruct the connection + pool to forego doing its own "reset" to save on the additional method + call. However, this prevented custom pool reset schemes from being + used within this hook, as such hooks by definition are doing more than + just calling ``.rollback()``, and need to be invoked under all + circumstances. This was a regression that appeared in version 1.4. + + For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an + alternate event hook to use for custom "reset" implementations. Version 2.0 + will feature an improved version of :meth:`.PoolEvents.reset` which is + called for additional scenarios such as termination of asyncio connections, + and is also passed contextual information about the reset, to allow for + "custom connection reset" schemes which can respond to different reset + scenarios in different ways. + + .. change:: + :tags: bug, orm + :tickets: 8704 + + Fixed issue where "selectin_polymorphic" loading for inheritance mappers + would not function correctly if the :paramref:`_orm.Mapper.polymorphic_on` + parameter referred to a SQL expression that was not directly mapped on the + class. + + .. change:: + :tags: bug, orm + :tickets: 8710 + + Fixed issue where the underlying DBAPI cursor would not be closed when + using the :class:`_orm.Query` object as an iterator, if a user-defined exception + case were raised within the iteration process, thereby causing the iterator + to be closed by the Python interpreter. When using + :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead + to the usual MySQL-related issues with server side cursors out of sync, + and without direct access to the :class:`.Result` object, end-user code + could not access the cursor in order to close it. + + To resolve, a catch for ``GeneratorExit`` is applied within the iterator + method, which will close the result object in those cases when the + iterator were interrupted, and by definition will be closed by the + Python interpreter. + + As part of this change as implemented for the 1.4 series, ensured that + ``.close()`` methods are available on all :class:`.Result` implementations + including :class:`.ScalarResult`, :class:`.MappingResult`. The 2.0 + version of this change also includes new context manager patterns for use + with :class:`.Result` classes. + + .. change:: + :tags: bug, engine + :tickets: 8710 + + Ensured all :class:`.Result` objects include a :meth:`.Result.close` method + as well as a :attr:`.Result.closed` attribute, including on + :class:`.ScalarResult` and :class:`.MappingResult`. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 8700 + + Fixed issue with :meth:`.Inspector.has_table`, which when used against a + view with the SQL Server dialect would erroneously return ``False``, due to + a regression in the 1.4 series which removed support for this on SQL + Server. The issue is not present in the 2.0 series which uses a different + reflection architecture. Test support is added to ensure ``has_table()`` + remains working per spec re: views. + + .. change:: + :tags: bug, sql + :tickets: 8724 + + Fixed issue which prevented the :func:`_sql.literal_column` construct from + working properly within the context of a :class:`.Select` construct as well + as other potential places where "anonymized labels" might be generated, if + the literal expression contained characters which could interfere with + format strings, such as open parenthesis, due to an implementation detail + of the "anonymous label" structure. + .. changelog:: :version: 1.4.42 diff --git a/doc/build/changelog/unreleased_14/8700.rst b/doc/build/changelog/unreleased_14/8700.rst deleted file mode 100644 index 0805c233843..00000000000 --- a/doc/build/changelog/unreleased_14/8700.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, mssql, reflection - :tickets: 8700 - - Fixed issue with :meth:`.Inspector.has_table`, which when used against a - view with the SQL Server dialect would erroneously return ``False``, due to - a regression in the 1.4 series which removed support for this on SQL - Server. The issue is not present in the 2.0 series which uses a different - reflection architecture. Test support is added to ensure ``has_table()`` - remains working per spec re: views. diff --git a/doc/build/changelog/unreleased_14/8704.rst b/doc/build/changelog/unreleased_14/8704.rst deleted file mode 100644 index 90d97287062..00000000000 --- a/doc/build/changelog/unreleased_14/8704.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8704 - - Fixed issue where "selectin_polymorphic" loading for inheritance mappers - would not function correctly if the :paramref:`_orm.Mapper.polymorphic_on` - parameter referred to a SQL expression that was not directly mapped on the - class. diff --git a/doc/build/changelog/unreleased_14/8708.rst b/doc/build/changelog/unreleased_14/8708.rst deleted file mode 100644 index bb7424faaf4..00000000000 --- a/doc/build/changelog/unreleased_14/8708.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 8708 - - Fixed issue where bound parameter names, including those automatically - derived from similarly-named database columns, which contained characters - that normally require quoting with Oracle would not be escaped when using - "expanding parameters" with the Oracle dialect, causing execution errors. - The usual "quoting" for bound parameters used by the Oracle dialect is not - used with the "expanding parameters" architecture, so escaping for a large - range of characters is used instead, now using a list of characters/escapes - that are specific to Oracle. - - diff --git a/doc/build/changelog/unreleased_14/8710.rst b/doc/build/changelog/unreleased_14/8710.rst deleted file mode 100644 index 4d4597d7f85..00000000000 --- a/doc/build/changelog/unreleased_14/8710.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8710 - - Fixed issue where the underlying DBAPI cursor would not be closed when - using the :class:`_orm.Query` object as an iterator, if a user-defined exception - case were raised within the iteration process, thereby causing the iterator - to be closed by the Python interpreter. When using - :meth:`_orm.Query.yield_per` to create server-side cursors, this would lead - to the usual MySQL-related issues with server side cursors out of sync, - and without direct access to the :class:`.Result` object, end-user code - could not access the cursor in order to close it. - - To resolve, a catch for ``GeneratorExit`` is applied within the iterator - method, which will close the result object in those cases when the - iterator were interrupted, and by definition will be closed by the - Python interpreter. - - As part of this change as implemented for the 1.4 series, ensured that - ``.close()`` methods are available on all :class:`.Result` implementations - including :class:`.ScalarResult`, :class:`.MappingResult`. The 2.0 - version of this change also includes new context manager patterns for use - with :class:`.Result` classes. - -.. change:: - :tags: bug, engine - :tickets: 8710 - - Ensured all :class:`.Result` objects include a :meth:`.Result.close` method - as well as a :attr:`.Result.closed` attribute, including on - :class:`.ScalarResult` and :class:`.MappingResult`. diff --git a/doc/build/changelog/unreleased_14/8711.rst b/doc/build/changelog/unreleased_14/8711.rst deleted file mode 100644 index cc76eb461cd..00000000000 --- a/doc/build/changelog/unreleased_14/8711.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8711 - - An informative exception is now raised when the - :func:`_orm.with_loader_criteria` option is used as a loader option added - to a specific "loader path", such as when using it within - :meth:`.Load.options`. This use is not supported as - :func:`_orm.with_loader_criteria` is only intended to be used as a top - level loader option. Previously, an internal error would be generated. diff --git a/doc/build/changelog/unreleased_14/8714.rst b/doc/build/changelog/unreleased_14/8714.rst deleted file mode 100644 index 6fd133a091a..00000000000 --- a/doc/build/changelog/unreleased_14/8714.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 8714 - - Fixed issue with :meth:`.Inspector.has_table`, which when used against a - temporary table with the SQL Server dialect would fail on some Azure - variants, due to an unnecessary information schema query that is not - supported on those server versions. Pull request courtesy Mike Barry. diff --git a/doc/build/changelog/unreleased_14/8717.rst b/doc/build/changelog/unreleased_14/8717.rst deleted file mode 100644 index 676f2e21525..00000000000 --- a/doc/build/changelog/unreleased_14/8717.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. change:: - :tags: bug, engine, regression - :tickets: 8717 - - Fixed issue where the :meth:`.PoolEvents.reset` event hook would not be be - called in all cases when a :class:`_engine.Connection` were closed and was - in the process of returning its DBAPI connection to the connection pool. - - The scenario was when the :class:`_engine.Connection` had already emitted - ``.rollback()`` on its DBAPI connection within the process of returning - the connection to the pool, where it would then instruct the connection - pool to forego doing its own "reset" to save on the additional method - call. However, this prevented custom pool reset schemes from being - used within this hook, as such hooks by definition are doing more than - just calling ``.rollback()``, and need to be invoked under all - circumstances. This was a regression that appeared in version 1.4. - - For version 1.4, the :meth:`.PoolEvents.checkin` remains viable as an - alternate event hook to use for custom "reset" implementations. Version 2.0 - will feature an improved version of :meth:`.PoolEvents.reset` which is - called for additional scenarios such as termination of asyncio connections, - and is also passed contextual information about the reset, to allow for - "custom connection reset" schemes which can respond to different reset - scenarios in different ways. diff --git a/doc/build/changelog/unreleased_14/8721.rst b/doc/build/changelog/unreleased_14/8721.rst deleted file mode 100644 index 9e62e66b53e..00000000000 --- a/doc/build/changelog/unreleased_14/8721.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8721 - - Fixed bug involving :class:`.Select` constructs, where combinations of - :meth:`.Select.select_from` with :meth:`.Select.join`, as well as when - using :meth:`.Select.join_from`, would cause the - :func:`_orm.with_loader_criteria` feature as well as the IN criteria needed - for single-table inheritance queries to not render, in cases where the - columns clause of the query did not explicitly include the left-hand side - entity of the JOIN. The correct entity is now transferred to the - :class:`.Join` object that's generated internally, so that the criteria - against the left side entity is correctly added. - diff --git a/doc/build/changelog/unreleased_14/8724.rst b/doc/build/changelog/unreleased_14/8724.rst deleted file mode 100644 index 8329697ceec..00000000000 --- a/doc/build/changelog/unreleased_14/8724.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8724 - - Fixed issue which prevented the :func:`_sql.literal_column` construct from - working properly within the context of a :class:`.Select` construct as well - as other potential places where "anonymized labels" might be generated, if - the literal expression contained characters which could interfere with - format strings, such as open parenthesis, due to an implementation detail - of the "anonymous label" structure. - diff --git a/doc/build/changelog/unreleased_14/8738.rst b/doc/build/changelog/unreleased_14/8738.rst deleted file mode 100644 index fb7b31ac34c..00000000000 --- a/doc/build/changelog/unreleased_14/8738.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8738 - - Fixed issue in joined eager loading where an assertion fail would occur - with a particular combination of outer/inner joined eager loads, when - eager loading across three mappers where the middle mapper was - an inherited subclass mapper. - diff --git a/doc/build/changelog/unreleased_14/8744.rst b/doc/build/changelog/unreleased_14/8744.rst deleted file mode 100644 index 6d24f0ff930..00000000000 --- a/doc/build/changelog/unreleased_14/8744.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 8744 - - Fixed issue where the ``nls_session_parameters`` view queried on first - connect in order to get the default decimal point character may not be - available depending on Oracle connection modes, and would therefore raise - an error. The approach to detecting decimal char has been simplified to - test a decimal value directly, instead of reading system views, which - works on any backend / driver. - diff --git a/doc/build/changelog/unreleased_14/8753.rst b/doc/build/changelog/unreleased_14/8753.rst deleted file mode 100644 index 6f898e9a064..00000000000 --- a/doc/build/changelog/unreleased_14/8753.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8753 - - Improved "dictionary mode" for :meth:`_orm.Session.get` so that synonym - names which refer to primary key attribute names may be indicated in the - named dictionary. diff --git a/doc/build/conf.py b/doc/build/conf.py index 86f1925303c..4f9550225bd 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.42" +release = "1.4.43" -release_date = "October 16, 2022" +release_date = "November 4, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 41d33e1a30bfcdd5ddefc1f244863984aa13c4e3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 4 Nov 2022 17:08:40 -0400 Subject: [PATCH 427/632] Version 1.4.44 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 516694dfe11..69353f9c5ab 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.44 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.43 :released: November 4, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 2714b994620..9449096d985 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.43" +__version__ = "1.4.44" def __go(lcls): From 24c89a7c57484b48905800e5ffea2b6e1c7f3e47 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 5 Nov 2022 10:26:36 -0400 Subject: [PATCH 428/632] gc.collect() in order to ensure GeneratorExit raised on pypy Fixes: #8762 Change-Id: Ibf656748ae6a54282ba58e91215e0f7a279b171a (cherry picked from commit 9febc1a53e4dbb363b1ead65bbe47e0c4f948bae) --- test/orm/test_query.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index a7fbf069ecf..a249aa34bce 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -81,6 +81,7 @@ from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.testing.util import gc_collect from sqlalchemy.types import NullType from sqlalchemy.types import TypeDecorator from sqlalchemy.util import collections_abc @@ -5439,6 +5440,7 @@ def _iter(self): if i > 1: raise Exception("hi") + gc_collect() # needed for pypy, #8762 assert asserted_result[0]._soft_closed assert not asserted_result[0].closed @@ -5460,6 +5462,7 @@ def test_yield_per_close_on_interrupted_iteration(self): if i > 1: raise Exception("hi") + gc_collect() # not apparently needed, but defensive for pypy re: #8762 assert not result._soft_closed assert not result.closed result.close() From 8f653e40496778294375abd7685e646d7e48a8db Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 5 Nov 2022 16:07:49 -0400 Subject: [PATCH 429/632] disable timing intensive on py27 this includes disabling a very not important test for automap that's failing on github Change-Id: Ib91be649ecb07e620060a38a206317dcd6a81899 --- test/ext/test_automap.py | 1 + tox.ini | 11 ++++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/test/ext/test_automap.py b/test/ext/test_automap.py index eaafa347785..9227405e91d 100644 --- a/test/ext/test_automap.py +++ b/test/ext/test_automap.py @@ -501,6 +501,7 @@ def _chaos(self): finally: e.dispose() + @testing.requires.timing_intensive def test_concurrent_automaps_w_configure(self): self._success = True threads = [threading.Thread(target=self._chaos) for i in range(30)] diff --git a/tox.ini b/tox.ini index 2000351716b..50bed6247e4 100644 --- a/tox.ini +++ b/tox.ini @@ -100,6 +100,11 @@ setenv= mysql: MYSQL={env:TOX_MYSQL:--db mysql} py2{,7}-mysql: MYSQL={env:TOX_MYSQL_PY2K:{env:TOX_MYSQL:--db mysql}} + + PY_SPECIFIC= + py3{,5,6,7,8,9}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive + py2{,7}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive + mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} @@ -119,7 +124,7 @@ passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_POSTGRESQL_PY2K TOX_MYSQL TOX_MY commands= cext: /bin/true nocext: sh -c "rm -f lib/sqlalchemy/*.so" - {env:BASECOMMAND} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + {env:BASECOMMAND} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:EXTRA_SQLITE_DRIVERS:} {env:POSTGRESQL:} {env:EXTRA_PG_DRIVERS:} {env:MYSQL:} {env:EXTRA_MYSQL_DRIVERS:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt @@ -160,7 +165,7 @@ commands = deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt # command run in the github action when cext are not active. @@ -168,5 +173,5 @@ commands= deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt From 1175768618d3a84578b90befe73240b417ebeb78 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Nov 2022 11:21:55 -0500 Subject: [PATCH 430/632] block all maridb-connector testing while we wait for this project to fix its install, ensure CI isn't running it. Change-Id: Iffc2efa38078a07c6ffc3417246b5b898ecbd53d --- tox.ini | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tox.ini b/tox.ini index 50bed6247e4..275cb351174 100644 --- a/tox.ini +++ b/tox.ini @@ -31,7 +31,7 @@ deps= mysql: .[mysql] mysql: .[pymysql] mysql: .[asyncmy]; python_version >= '3' - mysql: .[mariadb_connector]; python_version >= '3' + # mysql: .[mariadb_connector]; python_version >= '3' oracle: .[oracle] @@ -46,7 +46,7 @@ deps= dbapimain-mysql: git+https://github.com/PyMySQL/mysqlclient-python.git#egg=mysqlclient dbapimain-mysql: git+https://github.com/PyMySQL/PyMySQL.git#egg=pymysql - dbapimain-mysql: git+https://github.com/mariadb-corporation/mariadb-connector-python#egg=mariadb + # dbapimain-mysql: git+https://github.com/mariadb-corporation/mariadb-connector-python#egg=mariadb dbapimain-oracle: git+https://github.com/oracle/python-cx_Oracle.git#egg=cx_Oracle @@ -107,7 +107,8 @@ setenv= mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} - py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} + # py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} + py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver asyncmy} mssql: MSSQL={env:TOX_MSSQL:--db mssql} From c4f3a72cf5660d591eaac77c4d412d933689aeed Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Nov 2022 09:46:06 -0500 Subject: [PATCH 431/632] repair --disable-asyncio parameter Fixed issue where the ``--disable-asyncio`` parameter to the test suite would fail to not actually run greenlet tests and would also not prevent the suite from using a "wrapping" greenlet for the whole suite. This parameter now ensures that no greenlet or asyncio use will occur within the entire run when set. Fixes: #8793 Change-Id: I87b510846b2cc24413cd54e7b7136e91aad3c309 (cherry picked from commit 7f7e961f46aeff5895bd34fec9e2e208862d7a6b) --- doc/build/changelog/unreleased_14/8793.rst | 9 +++++++++ lib/sqlalchemy/testing/plugin/plugin_base.py | 2 +- lib/sqlalchemy/testing/requirements.py | 4 ++++ 3 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8793.rst diff --git a/doc/build/changelog/unreleased_14/8793.rst b/doc/build/changelog/unreleased_14/8793.rst new file mode 100644 index 00000000000..36f1003ccce --- /dev/null +++ b/doc/build/changelog/unreleased_14/8793.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, tests + :tickets: 8793 + + Fixed issue where the ``--disable-asyncio`` parameter to the test suite + would fail to not actually run greenlet tests and would also not prevent + the suite from using a "wrapping" greenlet for the whole suite. This + parameter now ensures that no greenlet or asyncio use will occur within the + entire run when set. diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index d59564e8e01..c941332f311 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -391,7 +391,7 @@ def _init_symbols(options, file_config): config._fixture_functions = _fixture_fn_class() -@post +@pre def _set_disable_asyncio(opt, file_config): if opt.disable_asyncio or not py3k: from sqlalchemy.testing import asyncio diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 7e8a030e322..069ff747dbb 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -18,6 +18,7 @@ import platform import sys +from . import asyncio as _test_asyncio from . import exclusions from . import only_on from .. import util @@ -1393,6 +1394,9 @@ def asyncio(self): @property def greenlet(self): def go(config): + if not _test_asyncio.ENABLE_ASYNCIO: + return False + try: import greenlet # noqa: F401 except ImportError: From f44e233dccd7739a9f73a2ea16ce6b5b8d7d0f5f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 7 Nov 2022 18:40:03 -0500 Subject: [PATCH 432/632] establish consistency for RETURNING column labels For the PostgreSQL and SQL Server dialects only, adjusted the compiler so that when rendering column expressions in the RETURNING clause, the "non anon" label that's used in SELECT statements is suggested for SQL expression elements that generate a label; the primary example is a SQL function that may be emitting as part of the column's type, where the label name should match the column's name by default. This restores a not-well defined behavior that had changed in version 1.4.21 due to :ticket:`6718`, :ticket:`6710`. The Oracle dialect has a different RETURNING implementation and was not affected by this issue. Version 2.0 features an across the board change for its widely expanded support of RETURNING on other backends. Fixed issue in the Oracle dialect where an INSERT statement that used ``insert(some_table).values(...).returning(some_table)`` against a full :class:`.Table` object at once would fail to execute, raising an exception. Fixes: #8770 Change-Id: I2ab078a214a778ffe1720dbd864ae4c105a0691d (cherry picked from commit c8a7b67181d31634355150fc0379ec0e780ff728) --- doc/build/changelog/unreleased_14/8770.rst | 23 +++++ lib/sqlalchemy/dialects/mssql/base.py | 1 + lib/sqlalchemy/dialects/oracle/cx_oracle.py | 6 +- lib/sqlalchemy/dialects/postgresql/base.py | 4 +- lib/sqlalchemy/sql/compiler.py | 10 +- lib/sqlalchemy/sql/selectable.py | 6 +- test/dialect/mssql/test_compiler.py | 29 ++++++ test/dialect/oracle/test_compiler.py | 31 ++++++ test/dialect/postgresql/test_compiler.py | 30 ++++++ test/sql/test_labels.py | 101 +++++++++++++++++++- test/sql/test_returning.py | 44 +++++++++ 11 files changed, 276 insertions(+), 9 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8770.rst diff --git a/doc/build/changelog/unreleased_14/8770.rst b/doc/build/changelog/unreleased_14/8770.rst new file mode 100644 index 00000000000..8968b0361ee --- /dev/null +++ b/doc/build/changelog/unreleased_14/8770.rst @@ -0,0 +1,23 @@ +.. change:: + :tags: bug, postgresql, mssql + :tickets: 8770 + + For the PostgreSQL and SQL Server dialects only, adjusted the compiler so + that when rendering column expressions in the RETURNING clause, the "non + anon" label that's used in SELECT statements is suggested for SQL + expression elements that generate a label; the primary example is a SQL + function that may be emitting as part of the column's type, where the label + name should match the column's name by default. This restores a not-well + defined behavior that had changed in version 1.4.21 due to :ticket:`6718`, + :ticket:`6710`. The Oracle dialect has a different RETURNING implementation + and was not affected by this issue. Version 2.0 features an across the + board change for its widely expanded support of RETURNING on other + backends. + + +.. change:: + :tags: bug, oracle + + Fixed issue in the Oracle dialect where an INSERT statement that used + ``insert(some_table).values(...).returning(some_table)`` against a full + :class:`.Table` object at once would fail to execute, raising an exception. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 0509413062f..ea9c90a51ec 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2154,6 +2154,7 @@ def returning_clause(self, stmt, returning_cols): stmt, adapter.traverse(c), {"result_map_targets": (c,)}, + fallback_label_name=c._non_anon_label, ) for c in expression._select_iterables(returning_cols) ] diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 90dabc83b93..fe18d1310b0 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -468,6 +468,7 @@ def _remove_clob(inputsizes, cursor, statement, parameters, context): from ... import types as sqltypes from ... import util from ...engine import cursor as _cursor +from ...sql import expression from ...util import compat @@ -887,11 +888,12 @@ def post_exec(self): self.cursor, [ (getattr(col, "name", col._anon_name_label), None) - for col in self.compiled.returning + for col in expression._select_iterables( + self.compiled.returning + ) ], initial_buffer=[tuple(returning_params)], ) - self.cursor_fetch_strategy = fetch_strategy def create_cursor(self): diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c94c7732545..b980183d007 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -2515,7 +2515,9 @@ def for_update_clause(self, select, **kw): def returning_clause(self, stmt, returning_cols): columns = [ - self._label_returning_column(stmt, c) + self._label_returning_column( + stmt, c, fallback_label_name=c._non_anon_label + ) for c in expression._select_iterables(returning_cols) ] diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 0e441fbec8e..a7232f096d6 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -3047,7 +3047,9 @@ def _add_to_result_map(self, keyname, name, objects, type_): ) self._result_columns.append((keyname, name, objects, type_)) - def _label_returning_column(self, stmt, column, column_clause_args=None): + def _label_returning_column( + self, stmt, column, column_clause_args=None, **kw + ): """Render a column with necessary labels inside of a RETURNING clause. This method is provided for individual dialects in place of calling @@ -3063,6 +3065,7 @@ def _label_returning_column(self, stmt, column, column_clause_args=None): True, False, {} if column_clause_args is None else column_clause_args, + **kw ) def _label_select_column( @@ -3127,7 +3130,6 @@ def add_to_result_map(keyname, name, objects, type_): "_label_select_column is only relevant within " "the columns clause of a SELECT or RETURNING" ) - if isinstance(column, elements.Label): if col_expr is not column: result_expr = _CompileLabel( @@ -4319,7 +4321,9 @@ def visit_sequence(self, seq, **kw): def returning_clause(self, stmt, returning_cols): columns = [ - self._label_select_column(None, c, True, False, {}) + self._label_select_column( + None, c, True, False, {}, fallback_label_name=c._non_anon_label + ) for c in base._select_iterables(returning_cols) ] diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 95e13f0810d..956f8ae8d8a 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -6191,7 +6191,7 @@ def _ensure_disambiguated_names(self): self = self.set_label_style(LABEL_STYLE_DISAMBIGUATE_ONLY) return self - def _generate_columns_plus_names(self, anon_for_dupe_key): + def _generate_columns_plus_names(self, anon_for_dupe_key, cols=None): """Generate column names as rendered in a SELECT statement by the compiler. @@ -6201,7 +6201,9 @@ def _generate_columns_plus_names(self, anon_for_dupe_key): _column_naming_convention as well. """ - cols = self._all_selected_columns + + if cols is None: + cols = self._all_selected_columns key_naming_convention = SelectState._column_naming_convention( self._label_style diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py index bad5e4e10b6..d54295b3062 100644 --- a/test/dialect/mssql/test_compiler.py +++ b/test/dialect/mssql/test_compiler.py @@ -37,6 +37,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing.assertions import eq_ignore_whitespace +from sqlalchemy.types import TypeEngine tbl = table("t", column("a")) @@ -104,6 +105,34 @@ def test_select_w_order_by_collate(self): "Latin1_General_CS_AS_KS_WS_CI ASC", ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) OUTPUT inserted.name, " + "lower(inserted.value) AS value VALUES (:name, :value)", + ) + def test_join_with_hint(self): t1 = table( "t1", diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 22ffc888ab0..8a8f51df012 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -8,6 +8,7 @@ from sqlalchemy import func from sqlalchemy import Identity from sqlalchemy import Index +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import literal from sqlalchemy import literal_column @@ -39,6 +40,7 @@ from sqlalchemy.testing.assertions import eq_ignore_whitespace from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.types import TypeEngine class CompileTest(fixtures.TestBase, AssertsCompiledSQL): @@ -1150,6 +1152,35 @@ def test_returning_insert_labeled(self): "t1.c2, t1.c3 INTO :ret_0, :ret_1", ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) " + "INTO :ret_0, :ret_1", + ) + def test_returning_insert_computed(self): m = MetaData() t1 = Table( diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 897909b158b..0249c7952ce 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -59,6 +59,7 @@ from sqlalchemy.testing.assertions import AssertsCompiledSQL from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_ +from sqlalchemy.types import TypeEngine from sqlalchemy.util import OrderedDict from sqlalchemy.util import u @@ -205,6 +206,35 @@ def test_insert_returning(self): dialect=dialect, ) + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) " + "VALUES (%(name)s, %(value)s) RETURNING some_table.name, " + "lower(some_table.value) AS value", + ) + def test_create_drop_enum(self): # test escaping and unicode within CREATE TYPE for ENUM typ = postgresql.ENUM( diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py index a82b0372eaa..869134f9c0c 100644 --- a/test/sql/test_labels.py +++ b/test/sql/test_labels.py @@ -2,6 +2,8 @@ from sqlalchemy import Boolean from sqlalchemy import cast from sqlalchemy import exc as exceptions +from sqlalchemy import func +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import literal_column from sqlalchemy import MetaData @@ -32,6 +34,7 @@ from sqlalchemy.testing import mock from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table +from sqlalchemy.types import TypeEngine IDENT_LENGTH = 29 @@ -802,7 +805,7 @@ class ColExprLabelTest(fixtures.TestBase, AssertsCompiledSQL): """ - __dialect__ = "default" + __dialect__ = "default_enhanced" table1 = table("some_table", column("name"), column("value")) @@ -827,6 +830,101 @@ def process(element, compiler, **kw): return SomeColThing + @testing.fixture + def compiler_column_fixture(self): + return self._fixture() + + @testing.fixture + def column_expression_fixture(self): + class MyString(TypeEngine): + def column_expression(self, column): + return func.lower(column) + + return table( + "some_table", column("name", String), column("value", MyString) + ) + + def test_plain_select_compiler_expression(self, compiler_column_fixture): + expr = compiler_column_fixture + table1 = self.table1 + + self.assert_compile( + select( + table1.c.name, + expr(table1.c.value), + ), + "SELECT some_table.name, SOME_COL_THING(some_table.value) " + "AS value FROM some_table", + ) + + def test_plain_select_column_expression(self, column_expression_fixture): + table1 = column_expression_fixture + + self.assert_compile( + select(table1), + "SELECT some_table.name, lower(some_table.value) AS value " + "FROM some_table", + ) + + def test_plain_returning_compiler_expression( + self, compiler_column_fixture + ): + expr = compiler_column_fixture + table1 = self.table1 + + self.assert_compile( + insert(table1).returning( + table1.c.name, + expr(table1.c.value), + ), + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, " + "SOME_COL_THING(some_table.value) AS value", + ) + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns + ): + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = insert(table1).returning(table1) + else: + stmt = insert(table1).returning(table1.c.name, table1.c.value) + + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) AS value", + ) + + def test_select_dupes_column_expression(self, column_expression_fixture): + table1 = column_expression_fixture + + self.assert_compile( + select(table1.c.name, table1.c.value, table1.c.value), + "SELECT some_table.name, lower(some_table.value) AS value, " + "lower(some_table.value) AS value__1 FROM some_table", + ) + + def test_returning_dupes_column_expression( + self, column_expression_fixture + ): + table1 = column_expression_fixture + + stmt = insert(table1).returning( + table1.c.name, table1.c.value, table1.c.value + ) + + # 1.4 behavior only; limited support for labels in RETURNING + self.assert_compile( + stmt, + "INSERT INTO some_table (name, value) VALUES (:name, :value) " + "RETURNING some_table.name, lower(some_table.value) AS value, " + "lower(some_table.value) AS value", + ) + def test_column_auto_label_dupes_label_style_none(self): expr = self._fixture() table1 = self.table1 @@ -991,6 +1089,7 @@ def test_boolean_auto_label(self): # not sure if this SQL is right but this is what it was # before the new labeling, just different label name "SELECT value = 0 AS value, value", + dialect="default", ) def test_label_auto_label_use_labels(self): diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py index 10bf3beb6fe..2db9b8bc9de 100644 --- a/test/sql/test_returning.py +++ b/test/sql/test_returning.py @@ -350,6 +350,50 @@ def test_no_ipk_on_returning(self, connection): "inserted_primary_key", ) + @testing.fixture + def column_expression_fixture(self, metadata, connection): + class MyString(TypeDecorator): + cache_ok = True + impl = String(50) + + def column_expression(self, column): + return func.lower(column) + + t1 = Table( + "some_table", + metadata, + Column("name", String(50)), + Column("value", MyString(50)), + ) + metadata.create_all(connection) + return t1 + + @testing.combinations("columns", "table", argnames="use_columns") + def test_plain_returning_column_expression( + self, column_expression_fixture, use_columns, connection + ): + """test #8770""" + table1 = column_expression_fixture + + if use_columns == "columns": + stmt = ( + insert(table1) + .values(name="n1", value="ValUE1") + .returning(table1) + ) + else: + stmt = ( + insert(table1) + .values(name="n1", value="ValUE1") + .returning(table1.c.name, table1.c.value) + ) + + result = connection.execute(stmt) + row = result.first() + + eq_(row._mapping["name"], "n1") + eq_(row._mapping["value"], "value1") + @testing.fails_on_everything_except("postgresql", "firebird") def test_literal_returning(self, connection): if testing.against("postgresql"): From 11cd18ce03d0b716ce05ee8297ffa29ae671cd21 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 10 Nov 2022 17:01:58 -0500 Subject: [PATCH 433/632] ensure anon_map is passed for most annotated traversals We can cache the annotated cache key for Table, but for selectables it's not safe, as it fails to pass the anon_map along and creates many redudant structures in observed test scenario. It is likely safe for a Column that's mapped to a Table also, however this is not implemented here. Will have to see if that part needs adjusting. Fixed critical memory issue identified in cache key generation, where for very large and complex ORM statements that make use of lots of ORM aliases with subqueries, cache key generation could produce excessively large keys that were orders of magnitude bigger than the statement itself. Much thanks to Rollo Konig Brock for their very patient, long term help in finally identifying this issue. Also within TypeEngine objects, when we generate elements for instance variables, skip the None elements at least. this also saves on tuple complexity. Fixes: #8790 Change-Id: I448ddbfb45ae0a648815be8dad4faad7d1977427 (cherry picked from commit 88c240d907a9ae3b5caf766009edd196a30cece3) --- doc/build/changelog/unreleased_14/8790.rst | 10 +++ lib/sqlalchemy/sql/annotation.py | 6 +- lib/sqlalchemy/sql/elements.py | 29 +++++++- lib/sqlalchemy/sql/schema.py | 13 ++++ lib/sqlalchemy/sql/traversals.py | 16 ++-- lib/sqlalchemy/sql/type_api.py | 4 +- lib/sqlalchemy/testing/__init__.py | 2 + lib/sqlalchemy/testing/assertions.py | 11 +++ lib/sqlalchemy/testing/util.py | 63 ++++++++++++++++ lib/sqlalchemy/util/__init__.py | 2 + lib/sqlalchemy/util/compat.py | 2 + test/orm/test_cache_key.py | 85 ++++++++++++++++++++++ 12 files changed, 232 insertions(+), 11 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8790.rst diff --git a/doc/build/changelog/unreleased_14/8790.rst b/doc/build/changelog/unreleased_14/8790.rst new file mode 100644 index 00000000000..a3214801c8e --- /dev/null +++ b/doc/build/changelog/unreleased_14/8790.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sql + :tickets: 8790 + + Fixed critical memory issue identified in cache key generation, where for + very large and complex ORM statements that make use of lots of ORM aliases + with subqueries, cache key generation could produce excessively large keys + that were orders of magnitude bigger than the statement itself. Much thanks + to Rollo Konig Brock for their very patient, long term help in finally + identifying this issue. diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index 5c000ed6c3f..01b5a53a6e3 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -26,12 +26,16 @@ class SupportsAnnotations(object): @util.memoized_property def _annotations_cache_key(self): anon_map_ = anon_map() + + return self._gen_annotations_cache_key(anon_map_) + + def _gen_annotations_cache_key(self, anon_map): return ( "_annotations", tuple( ( key, - value._gen_cache_key(anon_map_, []) + value._gen_cache_key(anon_map, []) if isinstance(value, HasCacheKey) else value, ) diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index eb5bc5a0087..72486e749ab 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -203,7 +203,8 @@ class ClauseElement( is_clause_element = True is_selectable = False - + _gen_static_annotations_cache_key = False + _is_table = False _is_textual = False _is_from_clause = False _is_returns_rows = False @@ -3079,7 +3080,7 @@ class Cast(WrapsColumnExpression, ColumnElement): _traverse_internals = [ ("clause", InternalTraversal.dp_clauseelement), - ("typeclause", InternalTraversal.dp_clauseelement), + ("type", InternalTraversal.dp_type), ] def __init__(self, expression, type_): @@ -3880,7 +3881,20 @@ class BinaryExpression(ColumnElement): ( "type", InternalTraversal.dp_type, - ), # affects JSON CAST operators + ), + ] + + _cache_key_traversal = [ + ("left", InternalTraversal.dp_clauseelement), + ("right", InternalTraversal.dp_clauseelement), + ("operator", InternalTraversal.dp_operator), + ("modifiers", InternalTraversal.dp_plain_dict), + # "type" affects JSON CAST operators, so while redundant in most cases, + # is needed for that one + ( + "type", + InternalTraversal.dp_type, + ), ] _is_implicitly_boolean = True @@ -4016,6 +4030,10 @@ class Grouping(GroupedElement, ColumnElement): ("type", InternalTraversal.dp_type), ] + _cache_key_traversal = [ + ("element", InternalTraversal.dp_clauseelement), + ] + def __init__(self, element): self.element = element self.type = getattr(element, "type", type_api.NULLTYPE) @@ -4516,6 +4534,11 @@ class Label(roles.LabeledColumnExprRole, ColumnElement): ("_element", InternalTraversal.dp_clauseelement), ] + _cache_key_traversal = [ + ("name", InternalTraversal.dp_anon_name), + ("_element", InternalTraversal.dp_clauseelement), + ] + def __init__(self, name, element, type_=None): """Return a :class:`Label` object for the given :class:`_expression.ColumnElement`. diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index dde665cbde7..8198a829839 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -544,6 +544,8 @@ def listen_for_reflect(table, column_info): ("schema", InternalTraversal.dp_string) ] + _is_table = True + def _gen_cache_key(self, anon_map, bindparams): if self._annotations: return (self,) + self._annotations_cache_key @@ -1810,6 +1812,17 @@ def __init__(self, *args, **kwargs): """ + @util.memoized_property + def _gen_static_annotations_cache_key(self): + """special attribute used by cache key gen, if true, we will + use a static cache key for the annotations dictionary, else we + will generate a new cache key for annotations each time. + + Added for #8790 + + """ + return self.table is not None and self.table._is_table + def _extra_kwargs(self, **kwargs): self._validate_dialect_kwargs(kwargs) diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 9da61ab28cb..21aa17a0a64 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -246,12 +246,16 @@ def _gen_cache_key(self, anon_map, bindparams): else None, ) elif meth is InternalTraversal.dp_annotations_key: - # obj is here is the _annotations dict. however, we - # want to use the memoized cache key version of it. for - # Columns, this should be long lived. For select() - # statements, not so much, but they usually won't have - # annotations. - result += self._annotations_cache_key + # obj is here is the _annotations dict. Table uses + # a memoized version of it. however in other cases, + # we generate it given anon_map as we may be from a + # Join, Aliased, etc. + # see #8790 + + if self._gen_static_annotations_cache_key: # type: ignore # noqa: E501 + result += self._annotations_cache_key # type: ignore # noqa: E501 + else: + result += self._gen_annotations_cache_key(anon_map) # type: ignore # noqa: E501 elif ( meth is InternalTraversal.dp_clauseelement_list or meth is InternalTraversal.dp_clauseelement_tuple diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 29dc74971c8..30fc4189bba 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -745,7 +745,9 @@ def _static_cache_key(self): else self.__dict__[k], ) for k in names - if k in self.__dict__ and not k.startswith("_") + if k in self.__dict__ + and not k.startswith("_") + and self.__dict__[k] is not None ) def adapt(self, cls, **kw): diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index 80d344faf1e..73b43f04bd4 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -28,6 +28,7 @@ from .assertions import expect_raises_message from .assertions import expect_warnings from .assertions import in_ +from .assertions import int_within_variance from .assertions import is_ from .assertions import is_false from .assertions import is_instance_of @@ -48,6 +49,7 @@ from .config import db from .config import fixture from .config import requirements as requires +from .config import skip_test from .exclusions import _is_excluded from .exclusions import _server_version from .exclusions import against as _against diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 9a3c06b0290..ba6ee14c3b5 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -243,6 +243,17 @@ def _assert_no_stray_pool_connections(): engines.testing_reaper.assert_all_closed() +def int_within_variance(expected, received, variance): + deviance = int(expected * variance) + assert ( + abs(received - expected) < deviance + ), "Given int value %s is not within %d%% of expected value %s" % ( + received, + variance * 100, + expected, + ) + + def eq_regex(a, b, msg=None): assert re.match(b, a), msg or "%r !~ %r" % (a, b) diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index be89bc6e448..9baf1014b0e 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -5,10 +5,13 @@ # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php +from collections import deque import decimal import gc +from itertools import chain import random import sys +from sys import getsizeof import types from . import config @@ -456,3 +459,63 @@ def decorate(fn, *arg, **kw): event_cls._clear() return decorate + + +def total_size(o): + """Returns the approximate memory footprint an object and all of its + contents. + + source: https://code.activestate.com/recipes/577504/ + + + """ + + def dict_handler(d): + return chain.from_iterable(d.items()) + + all_handlers = { + tuple: iter, + list: iter, + deque: iter, + dict: dict_handler, + set: iter, + frozenset: iter, + } + seen = set() # track which object id's have already been seen + default_size = getsizeof(0) # estimate sizeof object without __sizeof__ + + def sizeof(o): + if id(o) in seen: # do not double count the same object + return 0 + seen.add(id(o)) + s = getsizeof(o, default_size) + + for typ, handler in all_handlers.items(): + if isinstance(o, typ): + s += sum(map(sizeof, handler(o))) + break + return s + + return sizeof(o) + + +def count_cache_key_tuples(tup): + """given a cache key tuple, counts how many instances of actual + tuples are found. + + used to alert large jumps in cache key complexity. + + """ + stack = [tup] + + sentinel = object() + num_elements = 0 + + while stack: + elem = stack.pop(0) + if elem is sentinel: + num_elements += 1 + elif isinstance(elem, tuple): + if elem: + stack = list(elem) + [sentinel] + stack + return num_elements diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 33427e3b504..d6ce6490344 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -77,7 +77,9 @@ from .compat import pickle from .compat import print_ from .compat import py2k +from .compat import py310 from .compat import py311 +from .compat import py312 from .compat import py37 from .compat import py38 from .compat import py39 diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 21a9491f8e6..2c2a1a77ae8 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -14,7 +14,9 @@ import platform import sys +py312 = sys.version_info >= (3, 12) py311 = sys.version_info >= (3, 11) +py310 = sys.version_info >= (3, 10) py39 = sys.version_info >= (3, 9) py38 = sys.version_info >= (3, 8) py37 = sys.version_info >= (3, 7) diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index 23fec61d2a0..169df909ec5 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -8,11 +8,14 @@ from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import true from sqlalchemy import update +from sqlalchemy import util +from sqlalchemy.ext.declarative import ConcreteBase from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle from sqlalchemy.orm import defaultload @@ -37,10 +40,15 @@ from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures +from sqlalchemy.testing import int_within_variance from sqlalchemy.testing import ne_ +from sqlalchemy.testing.fixtures import DeclarativeMappedTest from sqlalchemy.testing.fixtures import fixture_session +from sqlalchemy.testing.util import count_cache_key_tuples +from sqlalchemy.testing.util import total_size from test.orm import _fixtures from .inheritance import _poly_fixtures +from .test_events import _RemoveListeners from .test_query import QueryTest @@ -1032,3 +1040,80 @@ def test_bulk_update_cache_key(self): ) eq_(stmt._generate_cache_key(), stmt2._generate_cache_key()) + + +class EmbeddedSubqTest(_RemoveListeners, DeclarativeMappedTest): + """test #8790. + + it's expected that cache key structures will change, this test is here + testing something fairly similar to the issue we had (though vastly + smaller scale) so we mostly want to look for surprise jumps here. + + """ + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Employee(ConcreteBase, Base): + __tablename__ = "employee" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + __mapper_args__ = { + "polymorphic_identity": "employee", + "concrete": True, + } + + class Manager(Employee): + __tablename__ = "manager" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + manager_data = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "manager", + "concrete": True, + } + + class Engineer(Employee): + __tablename__ = "engineer" + id = Column(Integer, primary_key=True) + name = Column(String(50)) + engineer_info = Column(String(40)) + + __mapper_args__ = { + "polymorphic_identity": "engineer", + "concrete": True, + } + + @testing.combinations("tuples", "memory", argnames="assert_on") + def test_cache_key_gen(self, assert_on): + Employee = self.classes.Employee + + e1 = aliased(Employee) + e2 = aliased(Employee) + + subq = select(e1).union_all(select(e2)).subquery() + + anno = aliased(Employee, subq) + + stmt = select(anno) + + ck = stmt._generate_cache_key() + + if assert_on == "tuples": + # before the fix for #8790 this was 700 + int_within_variance(142, count_cache_key_tuples(ck), 0.05) + + elif assert_on == "memory": + # before the fix for #8790 this was 55154 + + if util.py312: + testing.skip_test("python platform not available") + elif util.py311: + int_within_variance(39996, total_size(ck), 0.05) + elif util.py310: + int_within_variance(29796, total_size(ck), 0.05) + else: + testing.skip_test("python platform not available") From 910504b67c22bfc767e2e47e284b792653efdefc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 11 Nov 2022 16:04:06 -0500 Subject: [PATCH 434/632] backport relevant mypy 0.990 fixes from main Changes from ebb54e80a5a52d0cce4cba1abc21c707a42c2c73 which are relevant here. Adjusted the test suite which tests the Mypy plugin to accommodate for changes in Mypy 0.990 regarding how it handles message output, which affect how sys.path is interpreted when determining if notes and errors should be printed for particular files. The change broke the test suite as the files within the test directory itself no longer produced messaging when run under the mypy API. Change-Id: I1728fd3bd21a4d499db0a4939ee27c67b2c34123 --- .../changelog/unreleased_14/mypy_fixes.rst | 9 +++++++++ test/ext/mypy/test_mypy_plugin_py3k.py | 17 +++++++++++++++-- 2 files changed, 24 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/mypy_fixes.rst diff --git a/doc/build/changelog/unreleased_14/mypy_fixes.rst b/doc/build/changelog/unreleased_14/mypy_fixes.rst new file mode 100644 index 00000000000..32e4f14658f --- /dev/null +++ b/doc/build/changelog/unreleased_14/mypy_fixes.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, tests + + Adjusted the test suite which tests the Mypy plugin to accommodate for + changes in Mypy 0.990 regarding how it handles message output, which affect + how sys.path is interpreted when determining if notes and errors should be + printed for particular files. The change broke the test suite as the files + within the test directory itself no longer produced messaging when run + under the mypy API. diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index 181a7958f3f..3df758c56db 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -63,7 +63,18 @@ def run(path, use_plugin=True, incremental=False): ), ] - args.append(path) + if incremental: + args.append(path) + else: + # mypy as of 0.990 is more aggressively blocking messaging + # for paths that are in sys.path, and as pytest puts currdir, + # test/ etc in sys.path, just copy the source file to the + # tempdir we are working in so that we don't have to try to + # manipulate sys.path and/or guess what mypy is doing + filename = os.path.basename(path) + test_program = os.path.join(cachedir, filename) + shutil.copyfile(path, test_program) + args.append(test_program) result = api.run(args) return result @@ -185,7 +196,9 @@ def test_mypy(self, mypy_runner, path): not_located = [] if expected_errors: - eq_(result[2], 1, msg=result) + # mypy 0.990 changed how return codes work, so don't assume a + # 1 or a 0 return code here, could be either depending on if + # errors were generated or not print(result[0]) From 6e338f168d4ca38278290dcf7f2c72f498c608fb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 12 Nov 2022 08:59:44 -0500 Subject: [PATCH 435/632] - 1.4.44 --- doc/build/changelog/changelog_14.rst | 57 ++++++++++++++++++- doc/build/changelog/unreleased_14/8770.rst | 23 -------- doc/build/changelog/unreleased_14/8790.rst | 10 ---- doc/build/changelog/unreleased_14/8793.rst | 9 --- .../changelog/unreleased_14/mypy_fixes.rst | 9 --- doc/build/conf.py | 4 +- 6 files changed, 58 insertions(+), 54 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8770.rst delete mode 100644 doc/build/changelog/unreleased_14/8790.rst delete mode 100644 doc/build/changelog/unreleased_14/8793.rst delete mode 100644 doc/build/changelog/unreleased_14/mypy_fixes.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 69353f9c5ab..ef0b83c3b4b 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,62 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.44 - :include_notes_from: unreleased_14 + :released: November 12, 2022 + + .. change:: + :tags: bug, sql + :tickets: 8790 + + Fixed critical memory issue identified in cache key generation, where for + very large and complex ORM statements that make use of lots of ORM aliases + with subqueries, cache key generation could produce excessively large keys + that were orders of magnitude bigger than the statement itself. Much thanks + to Rollo Konig Brock for their very patient, long term help in finally + identifying this issue. + + .. change:: + :tags: bug, postgresql, mssql + :tickets: 8770 + + For the PostgreSQL and SQL Server dialects only, adjusted the compiler so + that when rendering column expressions in the RETURNING clause, the "non + anon" label that's used in SELECT statements is suggested for SQL + expression elements that generate a label; the primary example is a SQL + function that may be emitting as part of the column's type, where the label + name should match the column's name by default. This restores a not-well + defined behavior that had changed in version 1.4.21 due to :ticket:`6718`, + :ticket:`6710`. The Oracle dialect has a different RETURNING implementation + and was not affected by this issue. Version 2.0 features an across the + board change for its widely expanded support of RETURNING on other + backends. + + + .. change:: + :tags: bug, oracle + + Fixed issue in the Oracle dialect where an INSERT statement that used + ``insert(some_table).values(...).returning(some_table)`` against a full + :class:`.Table` object at once would fail to execute, raising an exception. + + .. change:: + :tags: bug, tests + :tickets: 8793 + + Fixed issue where the ``--disable-asyncio`` parameter to the test suite + would fail to not actually run greenlet tests and would also not prevent + the suite from using a "wrapping" greenlet for the whole suite. This + parameter now ensures that no greenlet or asyncio use will occur within the + entire run when set. + + .. change:: + :tags: bug, tests + + Adjusted the test suite which tests the Mypy plugin to accommodate for + changes in Mypy 0.990 regarding how it handles message output, which affect + how sys.path is interpreted when determining if notes and errors should be + printed for particular files. The change broke the test suite as the files + within the test directory itself no longer produced messaging when run + under the mypy API. .. changelog:: :version: 1.4.43 diff --git a/doc/build/changelog/unreleased_14/8770.rst b/doc/build/changelog/unreleased_14/8770.rst deleted file mode 100644 index 8968b0361ee..00000000000 --- a/doc/build/changelog/unreleased_14/8770.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. change:: - :tags: bug, postgresql, mssql - :tickets: 8770 - - For the PostgreSQL and SQL Server dialects only, adjusted the compiler so - that when rendering column expressions in the RETURNING clause, the "non - anon" label that's used in SELECT statements is suggested for SQL - expression elements that generate a label; the primary example is a SQL - function that may be emitting as part of the column's type, where the label - name should match the column's name by default. This restores a not-well - defined behavior that had changed in version 1.4.21 due to :ticket:`6718`, - :ticket:`6710`. The Oracle dialect has a different RETURNING implementation - and was not affected by this issue. Version 2.0 features an across the - board change for its widely expanded support of RETURNING on other - backends. - - -.. change:: - :tags: bug, oracle - - Fixed issue in the Oracle dialect where an INSERT statement that used - ``insert(some_table).values(...).returning(some_table)`` against a full - :class:`.Table` object at once would fail to execute, raising an exception. diff --git a/doc/build/changelog/unreleased_14/8790.rst b/doc/build/changelog/unreleased_14/8790.rst deleted file mode 100644 index a3214801c8e..00000000000 --- a/doc/build/changelog/unreleased_14/8790.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8790 - - Fixed critical memory issue identified in cache key generation, where for - very large and complex ORM statements that make use of lots of ORM aliases - with subqueries, cache key generation could produce excessively large keys - that were orders of magnitude bigger than the statement itself. Much thanks - to Rollo Konig Brock for their very patient, long term help in finally - identifying this issue. diff --git a/doc/build/changelog/unreleased_14/8793.rst b/doc/build/changelog/unreleased_14/8793.rst deleted file mode 100644 index 36f1003ccce..00000000000 --- a/doc/build/changelog/unreleased_14/8793.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, tests - :tickets: 8793 - - Fixed issue where the ``--disable-asyncio`` parameter to the test suite - would fail to not actually run greenlet tests and would also not prevent - the suite from using a "wrapping" greenlet for the whole suite. This - parameter now ensures that no greenlet or asyncio use will occur within the - entire run when set. diff --git a/doc/build/changelog/unreleased_14/mypy_fixes.rst b/doc/build/changelog/unreleased_14/mypy_fixes.rst deleted file mode 100644 index 32e4f14658f..00000000000 --- a/doc/build/changelog/unreleased_14/mypy_fixes.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, tests - - Adjusted the test suite which tests the Mypy plugin to accommodate for - changes in Mypy 0.990 regarding how it handles message output, which affect - how sys.path is interpreted when determining if notes and errors should be - printed for particular files. The change broke the test suite as the files - within the test directory itself no longer produced messaging when run - under the mypy API. diff --git a/doc/build/conf.py b/doc/build/conf.py index 4f9550225bd..baa10ab30eb 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.43" +release = "1.4.44" -release_date = "November 4, 2022" +release_date = "November 12, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 5b96720309c30c2a89d9d58ba03d7a7c3bd233f8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 12 Nov 2022 09:13:37 -0500 Subject: [PATCH 436/632] Version 1.4.45 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index ef0b83c3b4b..273947fcfaa 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.45 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.44 :released: November 12, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 9449096d985..be8a8ebc077 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.44" +__version__ = "1.4.45" def __go(lcls): From 3d892381996e7ba68ee3bf6d7de40c8414e3812a Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 12 Nov 2022 20:15:26 +0100 Subject: [PATCH 437/632] Repair test in 32bit python builds. Change-Id: I8287f3e1a975534c8a01a41c9dcc7e5e9f08bb52 (cherry picked from commit 9f4ac8d155f58b59cf314cfbc73195ed51a0c146) --- lib/sqlalchemy/testing/requirements.py | 4 ++++ lib/sqlalchemy/util/__init__.py | 1 + lib/sqlalchemy/util/compat.py | 1 + test/orm/test_cache_key.py | 6 +++++- 4 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 069ff747dbb..8cd4c64f27b 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1312,6 +1312,10 @@ def cpython(self): lambda: util.cpython, "cPython interpreter needed" ) + @property + def is64bit(self): + return exclusions.only_if(lambda: util.is64bit, "64bit required") + @property def patch_library(self): def check_lib(): diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index d6ce6490344..c34f73dcaef 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -64,6 +64,7 @@ from .compat import has_refcount_gc from .compat import inspect_getfullargspec from .compat import int_types +from .compat import is64bit from .compat import iterbytes from .compat import itertools_filter from .compat import itertools_filterfalse diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 2c2a1a77ae8..2b5a2c0ef42 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -29,6 +29,7 @@ win32 = sys.platform.startswith("win") osx = sys.platform.startswith("darwin") arm = "aarch" in platform.machine().lower() +is64bit = platform.architecture()[0] == "64bit" has_refcount_gc = bool(cpython) diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index 169df909ec5..6720baf024f 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -1087,7 +1087,11 @@ class Engineer(Employee): "concrete": True, } - @testing.combinations("tuples", "memory", argnames="assert_on") + Base.registry.configure() + + @testing.combinations( + "tuples", ("memory", testing.requires.is64bit), argnames="assert_on" + ) def test_cache_key_gen(self, assert_on): Employee = self.classes.Employee From bf6adda95461d4c3c2076d39850df2403a157662 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 14 Nov 2022 13:18:45 -0500 Subject: [PATCH 438/632] avoid putting annotated columns in sets backporting a small bit of the changes made for the 2.0 version of #8796. See if the changes apply cleanly to the 1.4 branch. Fixes: #8796 Change-Id: I8118511a10beb38c545a55c962a18a77611293af --- lib/sqlalchemy/sql/base.py | 4 ++-- lib/sqlalchemy/sql/elements.py | 8 ++++---- test/sql/test_selectable.py | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index ec685d1fac1..4519e649ba6 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1430,7 +1430,7 @@ def embedded(expanded_proxy_set, target_set): operator.add, [ sc._annotations.get("weight", 1) - for sc in col._uncached_proxy_set() + for sc in col._uncached_proxy_list() if sc.shares_lineage(column) ], ) @@ -1438,7 +1438,7 @@ def embedded(expanded_proxy_set, target_set): operator.add, [ sc._annotations.get("weight", 1) - for sc in c._uncached_proxy_set() + for sc in c._uncached_proxy_list() if sc.shares_lineage(column) ], ) diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 72486e749ab..d438e5995cd 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -902,21 +902,21 @@ def base_columns(self): @util.memoized_property def proxy_set(self): - s = util.column_set([self]) + s = util.column_set([self._deannotate()]) for c in self._proxies: s.update(c.proxy_set) return s - def _uncached_proxy_set(self): + def _uncached_proxy_list(self): """An 'uncached' version of proxy set. This is so that we can read annotations from the list of columns without breaking the caching of the above proxy_set. """ - s = util.column_set([self]) + s = [self] for c in self._proxies: - s.update(c._uncached_proxy_set()) + s.extend(c._uncached_proxy_list()) return s def shares_lineage(self, othercolumn): diff --git a/test/sql/test_selectable.py b/test/sql/test_selectable.py index a3f7b7c4682..c29d9d5a50a 100644 --- a/test/sql/test_selectable.py +++ b/test/sql/test_selectable.py @@ -2975,7 +2975,7 @@ def test_proxy_set_iteration_includes_annotated(self): # proxy_set, as corresponding_column iterates through proxy_set # in this way d = {} - for col in p2._uncached_proxy_set(): + for col in p2._uncached_proxy_list(): d.update(col._annotations) eq_(d, {"weight": 10}) @@ -2991,7 +2991,7 @@ def test_proxy_set_iteration_includes_annotated_two(self): proxy._proxies = [c1._annotate({"weight": 10})] d = {} - for col in proxy._uncached_proxy_set(): + for col in proxy._uncached_proxy_list(): d.update(col._annotations) eq_(d, {"weight": 10}) From 3ea8273c7926d19d4745bf5a859927838fa0783c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 14 Nov 2022 08:54:56 -0500 Subject: [PATCH 439/632] add informative exception context for literal render An informative re-raise is now thrown in the case where any "literal bindparam" render operation fails, indicating the value itself and the datatype in use, to assist in debugging when literal params are being rendered in a statement. Fixes: #8800 Change-Id: Id658f8b03359312353ddbb0c7563026239579f7b (cherry picked from commit c7baf6e0aa624c9378c3bc3c4923d1e188d62dc9) --- doc/build/changelog/unreleased_14/8800.rst | 8 ++++ lib/sqlalchemy/sql/compiler.py | 29 ++++++++++++-- lib/sqlalchemy/sql/sqltypes.py | 7 +--- lib/sqlalchemy/sql/util.py | 6 +++ test/sql/test_compiler.py | 46 ++++++++++++++++++++++ test/sql/test_types.py | 7 ++-- test/sql/test_values.py | 3 +- 7 files changed, 92 insertions(+), 14 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8800.rst diff --git a/doc/build/changelog/unreleased_14/8800.rst b/doc/build/changelog/unreleased_14/8800.rst new file mode 100644 index 00000000000..8a42975df74 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8800.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: usecase, sql + :tickets: 8800 + + An informative re-raise is now thrown in the case where any "literal + bindparam" render operation fails, indicating the value itself and + the datatype in use, to assist in debugging when literal params + are being rendered in a statement. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index a7232f096d6..611cd182187 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -38,6 +38,7 @@ from . import schema from . import selectable from . import sqltypes +from . import util as sql_util from .base import NO_ARG from .base import prefix_anon_map from .elements import quoted_name @@ -1216,7 +1217,8 @@ def _process_parameters_for_postcompile( replacement_expressions[ escaped_name ] = self.render_literal_bindparam( - parameter, render_literal_value=value + parameter, + render_literal_value=value, ) continue @@ -2590,10 +2592,29 @@ def render_literal_value(self, value, type_): processor = type_._cached_literal_processor(self.dialect) if processor: - return processor(value) + try: + return processor(value) + except Exception as e: + util.raise_( + exc.CompileError( + "Could not render literal value " + '"%s" ' + "with datatype " + "%s; see parent stack trace for " + "more detail." + % ( + sql_util._repr_single_value(value), + type_, + ) + ), + from_=e, + ) + else: - raise NotImplementedError( - "Don't know how to literal-quote value %r" % value + raise exc.CompileError( + "No literal value renderer is available for literal value " + '"%s" with datatype %s' + % (sql_util._repr_single_value(value), type_) ) def _truncate_bindparam(self, bindparam): diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 4a988755cd2..c2b8bbbe4a0 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -3262,12 +3262,7 @@ class NullType(TypeEngine): _isnull = True def literal_processor(self, dialect): - def process(value): - raise exc.CompileError( - "Don't know how to render literal SQL value: %r" % (value,) - ) - - return process + return None class Comparator(TypeEngine.Comparator): def _adapt_expression(self, op, other_comparator): diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 019b29e3d1e..1a5143fa586 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -484,6 +484,12 @@ def trunc(self, value): return rep +def _repr_single_value(value): + rp = _repr_base() + rp.max_chars = 300 + return rp.trunc(value) + + class _repr_row(_repr_base): """Provide a string view of a row.""" diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 5953c6449e4..831ef188720 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -98,6 +98,7 @@ from sqlalchemy.testing import mock from sqlalchemy.testing import ne_ from sqlalchemy.testing.schema import pep435_enum +from sqlalchemy.types import UserDefinedType from sqlalchemy.util import u table1 = table( @@ -4519,6 +4520,51 @@ def test_multiple_col_binds(self): "OR mytable.myid = :myid_2 OR mytable.myid = :myid_3", ) + @testing.combinations("plain", "expanding", argnames="exprtype") + def test_literal_bind_typeerror(self, exprtype): + """test #8800""" + + if exprtype == "expanding": + stmt = select(table1).where( + table1.c.myid.in_([("tuple",), ("tuple",)]) + ) + elif exprtype == "plain": + stmt = select(table1).where(table1.c.myid == ("tuple",)) + else: + assert False + + with expect_raises_message( + exc.CompileError, + r"Could not render literal value \"\(\'tuple\',\)\" " + r"with datatype INTEGER; see parent " + r"stack trace for more detail.", + ): + stmt.compile(compile_kwargs={"literal_binds": True}) + + @testing.combinations("plain", "expanding", argnames="exprtype") + def test_literal_bind_dont_know_how_to_quote(self, exprtype): + """test #8800""" + + class MyType(UserDefinedType): + def get_col_spec(self, **kw): + return "MYTYPE" + + col = column("x", MyType()) + + if exprtype == "expanding": + stmt = select(table1).where(col.in_([("tuple",), ("tuple",)])) + elif exprtype == "plain": + stmt = select(table1).where(col == ("tuple",)) + else: + assert False + + with expect_raises_message( + exc.CompileError, + r"No literal value renderer is available for literal " + r"value \"\('tuple',\)\" with datatype MYTYPE", + ): + stmt.compile(compile_kwargs={"literal_binds": True}) + @testing.fixture def ansi_compiler_fixture(self): dialect = default.DefaultDialect() diff --git a/test/sql/test_types.py b/test/sql/test_types.py index c4f2f27260a..4fdbcf95116 100644 --- a/test/sql/test_types.py +++ b/test/sql/test_types.py @@ -3615,7 +3615,8 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL): def test_compile_err_formatting(self): with expect_raises_message( exc.CompileError, - r"Don't know how to render literal SQL value: \(1, 2, 3\)", + r"No literal value renderer is available for literal " + r"value \"\(1, 2, 3\)\" with datatype NULL", ): func.foo((1, 2, 3)).compile(compile_kwargs={"literal_binds": True}) @@ -4230,8 +4231,8 @@ def test_render_datetime(self, value): lit = literal(value) assert_raises_message( - NotImplementedError, - "Don't know how to literal-quote value.*", + exc.CompileError, + r"No literal value renderer is available for literal value.*", lit.compile, dialect=testing.db.dialect, compile_kwargs={"literal_binds": True}, diff --git a/test/sql/test_values.py b/test/sql/test_values.py index dcd32a6791a..1c5e0a1fbb6 100644 --- a/test/sql/test_values.py +++ b/test/sql/test_values.py @@ -277,7 +277,8 @@ def test_use_cols_tricky_not_every_type_given( with expect_raises_message( exc.CompileError, - "Don't know how to render literal SQL value: 'textA'", + r"No literal value renderer is available for literal " + r"value \"'textA'\" with datatype NULL", ): str(stmt) From b5ee0c7ff566291ce5823ba3e0ea624946c966b5 Mon Sep 17 00:00:00 2001 From: Eitan Mosenkis Date: Mon, 14 Nov 2022 23:11:15 +0200 Subject: [PATCH 440/632] Explicitly state what happens if `order_by` is called more than once. (#8791) * Explicitly state what happens if `order_by` is called more than once. The existing docs cover how to clear existing `order_by` clauses but don't actually describe the behavior of calling `order_by` multiple times with different clauses. * Also update Select.order_by. (cherry picked from commit 9237bf15e612ba82555444751bd69dc2a831e7f4) --- lib/sqlalchemy/orm/query.py | 7 ++++--- lib/sqlalchemy/sql/selectable.py | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 65b6bf81a04..cef98201f3d 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1819,9 +1819,10 @@ def order_by(self, *clauses): q = session.query(Entity).order_by(Entity.id, Entity.name) - All existing ORDER BY criteria may be cancelled by passing - ``None`` by itself. New ORDER BY criteria may then be added by - invoking :meth:`_orm.Query.order_by` again, e.g.:: + Calling this method multiple times is equivalent to calling it once + with all the clauses concatenated. All existing ORDER BY criteria may + be cancelled by passing ``None`` by itself. New ORDER BY criteria may + then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: # will erase all ORDER BY and ORDER BY new_col alone q = q.order_by(None).order_by(new_col) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 956f8ae8d8a..f8252cbe468 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -3876,9 +3876,10 @@ def order_by(self, *clauses): stmt = select(table).order_by(table.c.id, table.c.name) - All existing ORDER BY criteria may be cancelled by passing - ``None`` by itself. New ORDER BY criteria may then be added by - invoking :meth:`_sql.Select.order_by` again, e.g.:: + Calling this method multiple times is equivalent to calling it once + with all the clauses concatenated. All existing ORDER BY criteria may + be cancelled by passing ``None`` by itself. New ORDER BY criteria may + then be added by invoking :meth:`_orm.Query.order_by` again, e.g.:: # will erase all ORDER BY and ORDER BY new_col alone stmt = stmt.order_by(None).order_by(new_col) From 9050bec33b8531c800e12d469b2613dbc909b895 Mon Sep 17 00:00:00 2001 From: Andy Garfield Date: Tue, 15 Nov 2022 11:28:58 -0500 Subject: [PATCH 441/632] Small tutorial rewording The language is this sentence took me a few reads to understand. This is just a rewording. (cherry picked from commit 36954a3dbb3a528d81c24db6b8698ba1e48d7cb2) Change-Id: I3d61653b682035bd616f80ed54c83e959cfb74fc --- doc/build/tutorial/dbapi_transactions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index cf93534e4fe..634b1f6cdcd 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -474,7 +474,7 @@ the block with a "commit as you go" commit. .. tip:: The :class:`_orm.Session` doesn't actually hold onto the :class:`_future.Connection` object after it ends the transaction. It gets a new :class:`_future.Connection` from the :class:`_future.Engine` - when executing SQL against the database is next needed. + the next time it needs to execute SQL against the database. The :class:`_orm.Session` obviously has a lot more tricks up its sleeve than that, however understanding that it has a :meth:`_orm.Session.execute` From bf5061cb53d45482581e8f0a22c4f892ee53e639 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 16 Nov 2022 20:11:18 -0500 Subject: [PATCH 442/632] accommodate NULL format_type() Made an adjustment to how the PostgreSQL dialect considers column types when it reflects columns from a table, to accommodate for alternative backends which may return NULL from the PG ``format_type()`` function. Fixes: #8748 Change-Id: I6178287aac567210a76afaa5805b825daa7fa4db (cherry picked from commit 200e70b9745f1f344be4a35bb8f2b5f01b40d467) --- doc/build/changelog/unreleased_14/8748.rst | 7 +++++++ lib/sqlalchemy/dialects/postgresql/base.py | 23 +++++++++++++++++----- test/dialect/postgresql/test_reflection.py | 17 ++++++++++++++++ 3 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8748.rst diff --git a/doc/build/changelog/unreleased_14/8748.rst b/doc/build/changelog/unreleased_14/8748.rst new file mode 100644 index 00000000000..27e06792276 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8748.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug postgresql + :tickets: 8748 + + Made an adjustment to how the PostgreSQL dialect considers column types + when it reflects columns from a table, to accommodate for alternative + backends which may return NULL from the PG ``format_type()`` function. diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index b980183d007..c390553353a 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -3975,12 +3975,19 @@ def _handle_array_type(attype): attype.endswith("[]"), ) - # strip (*) from character varying(5), timestamp(5) - # with time zone, geometry(POLYGON), etc. - attype = re.sub(r"\(.*\)", "", format_type) + if format_type is None: + no_format_type = True + attype = format_type = "no format_type()" + is_array = False + else: + no_format_type = False + + # strip (*) from character varying(5), timestamp(5) + # with time zone, geometry(POLYGON), etc. + attype = re.sub(r"\(.*\)", "", format_type) - # strip '[]' from integer[], etc. and check if an array - attype, is_array = _handle_array_type(attype) + # strip '[]' from integer[], etc. and check if an array + attype, is_array = _handle_array_type(attype) # strip quotes from case sensitive enum or domain names enum_or_domain_key = tuple(util.quoted_token_parser(attype)) @@ -4073,6 +4080,12 @@ def _handle_array_type(attype): coltype = coltype(*args, **kwargs) if is_array: coltype = self.ischema_names["_array"](coltype) + elif no_format_type: + util.warn( + "PostgreSQL format_type() returned NULL for column '%s'" + % (name,) + ) + coltype = sqltypes.NULLTYPE else: util.warn( "Did not recognize type '%s' of column '%s'" % (attype, name) diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index bf8cd511116..f33b3bde454 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -37,8 +37,10 @@ from sqlalchemy.testing.assertions import assert_warns from sqlalchemy.testing.assertions import AssertsExecutionResults from sqlalchemy.testing.assertions import eq_ +from sqlalchemy.testing.assertions import expect_warnings from sqlalchemy.testing.assertions import is_ from sqlalchemy.testing.assertions import is_true +from sqlalchemy.types import NullType class ReflectionFixtures(object): @@ -1822,6 +1824,21 @@ def test_instancelevel(self): dialect.ischema_names["my_custom_type"] = self.CustomType self._assert_reflected(dialect) + def test_no_format_type(self): + """test #8748""" + + dialect = postgresql.PGDialect() + dialect.ischema_names = dialect.ischema_names.copy() + dialect.ischema_names["my_custom_type"] = self.CustomType + + with expect_warnings( + r"PostgreSQL format_type\(\) returned NULL for column 'colname'" + ): + column_info = dialect._get_column_info( + "colname", None, None, False, {}, {}, "public", None, "", None + ) + assert isinstance(column_info["type"], NullType) + class IntervalReflectionTest(fixtures.TestBase): __only_on__ = "postgresql" From 34014cac6d67a6c374ba6426abca8023e9f3fb55 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 19 Nov 2022 13:10:31 -0500 Subject: [PATCH 443/632] clarify role of `__allow_unmapped__` A different note will be needed in the 1.4 version of these docs. Fixes: #8845 Change-Id: I48651c64d511684077c80a317349dd230424b575 (cherry picked from commit 46e6693cb3db445f18aa25d5e4ca613504bd12b3) --- doc/build/changelog/migration_20.rst | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index b33a5b0e676..f7009a28d57 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -387,7 +387,7 @@ SQLAlchemy 2.0 has new support for runtime interpretation of :pep:`484` typing a on ORM models. A requirement of these annotations is that they must make use of the :class:`_orm.Mapped` generic container. Annotations which don't use :class:`_orm.Mapped` which link to constructs such as :func:`_orm.relationship` -will raise errors, as they suggest mis-configurations. +will raise errors in Python, as they suggest mis-configurations. SQLAlchemy applications that use the :ref:`Mypy plugin ` with explicit annotations that don't use :class:`_orm.Mapped` in their annotations @@ -424,6 +424,15 @@ the ``__allow_unmapped__`` attribute may be used on the class or any subclasses, which will cause the annotations in these cases to be ignored completely by the new Declarative system. +.. note:: The ``__allow_unmapped__`` directive applies **only** to the + *runtime* behavior of the ORM. It does not affect the behavior of + Mypy, and the above mapping as written still requires that the Mypy + plugin be installed. For fully 2.0 style ORM models that will type + correctly under Mypy *without* a plugin, see the section named + "Migrating an Existing Mapping" in the "What's New in SQLAlchemy 2.0?" + document of the SQLAlchemy 2.0 documentation; this is the SQLAlchemy + 1.4 documentation. + The example below illustrates the application of ``__allow_unmapped__`` to the Declarative ``Base`` class, where it will take effect for all classes that descend from ``Base``:: From d44d3ae3bb11be35d4f207f17c8dcee1b229dad4 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 22 Nov 2022 00:07:53 +0100 Subject: [PATCH 444/632] Add security warning to serializer extension Change-Id: I5c7c076bc93fc250c05f7996e83359d19d1f3214 (cherry picked from commit 447249e8628ff849758c1a9cdf822ae060b7cb8b) --- lib/sqlalchemy/ext/serializer.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 094b71b0039..2b2ee79779b 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -14,6 +14,11 @@ form, but are instead re-associated with the query structure when it is deserialized. +.. warning:: The serializer extension uses pickle to serialize and + deserialize objects, so the same security consideration mentioned + in the `python documentation + `_ apply. + Usage is nearly the same as that of the standard Python pickle module:: from sqlalchemy.ext.serializer import loads, dumps From 17313f90e12c30a17048790914f13f8b32c3e86d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Nov 2022 17:44:30 -0500 Subject: [PATCH 445/632] flake8 has very courteously referred us to another pyqca project and closed all comments, thank you! I'll get on that right away pyqca/flake8-import-order does not seem to have a release or a commit in the past two years, so while I have created an issue and PR [1] [2], for now vendor our fork so we can get on with things. Also fix one issue for flake8 6.0 [1] https://github.com/PyCQA/flake8-import-order/issues/189 [2] https://github.com/PyCQA/flake8-import-order/pull/190 Change-Id: I53045f65b8716cceaf2104fccc1d26f80b398fef References: https://github.com/PyCQA/flake8/issues/1751 (cherry picked from commit fbec926c4744aa97a48a011939354c8b8f8be566) --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 275cb351174..87105998e56 100644 --- a/tox.ini +++ b/tox.ini @@ -147,7 +147,8 @@ commands = basepython = python3 deps= flake8 - flake8-import-order + #flake8-import-order + git+https://github.com/sqlalchemyorg/flake8-import-order@fix_options flake8-builtins flake8-docstrings>=1.3.1 flake8-rst-docstrings From 36914ae33a9d3357c7ad2d219e5af1031891be3c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 23 Nov 2022 10:58:28 -0500 Subject: [PATCH 446/632] add "merge" to viewonly cascades; propagate NO_RAISE when merging Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the current loaded contents of relationship attributes that were indicated with the :paramref:`_orm.relationship.viewonly` parameter, thus defeating strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects from caches and other similar techniques. In a related change, fixed issue where an object that contains a loaded relationship that was nonetheless configured as ``lazy='raise'`` on the mapping would fail when passed to :meth:`_orm.Session.merge`; checks for "raise" are now suspended within the merge process assuming the :paramref:`_orm.Session.merge.load` parameter remains at its default of ``True``. Overall, this is a behavioral adjustment to a change introduced in the 1.4 series as of :ticket:`4994`, which took "merge" out of the set of cascades applied by default to "viewonly" relationships. As "viewonly" relationships aren't persisted under any circumstances, allowing their contents to transfer during "merge" does not impact the persistence behavior of the target object. This allows :meth:`_orm.Session.merge` to correctly suit one of its use cases, that of adding objects to a :class:`.Session` that were loaded elsewhere, often for the purposes of restoring from a cache. Fixes: #8862 Change-Id: I8731c7810460e6a71f8bf5e8ded59142b9b02956 (cherry picked from commit 1e009bf086a42134190030f07068bc463e9a9794) --- doc/build/changelog/unreleased_14/8862.rst | 24 ++++ lib/sqlalchemy/orm/attributes.py | 8 +- lib/sqlalchemy/orm/base.py | 6 + lib/sqlalchemy/orm/relationships.py | 13 ++- lib/sqlalchemy/orm/util.py | 2 +- test/orm/test_cascade.py | 36 ++++-- test/orm/test_merge.py | 123 +++++++++++++++++++++ 7 files changed, 200 insertions(+), 12 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8862.rst diff --git a/doc/build/changelog/unreleased_14/8862.rst b/doc/build/changelog/unreleased_14/8862.rst new file mode 100644 index 00000000000..3be00789096 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8862.rst @@ -0,0 +1,24 @@ +.. change:: + :tags: bug, orm + :tickets: 8862 + + Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the + current loaded contents of relationship attributes that were indicated with + the :paramref:`_orm.relationship.viewonly` parameter, thus defeating + strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects + from caches and other similar techniques. In a related change, fixed issue + where an object that contains a loaded relationship that was nonetheless + configured as ``lazy='raise'`` on the mapping would fail when passed to + :meth:`_orm.Session.merge`; checks for "raise" are now suspended within + the merge process assuming the :paramref:`_orm.Session.merge.load` + parameter remains at its default of ``True``. + + Overall, this is a behavioral adjustment to a change introduced in the 1.4 + series as of :ticket:`4994`, which took "merge" out of the set of cascades + applied by default to "viewonly" relationships. As "viewonly" relationships + aren't persisted under any circumstances, allowing their contents to + transfer during "merge" does not impact the persistence behavior of the + target object. This allows :meth:`_orm.Session.merge` to correctly suit one + of its use cases, that of adding objects to a :class:`.Session` that were + loaded elsewhere, often for the purposes of restoring from a cache. + diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index efa20fb1cd1..37c7d70235c 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1585,7 +1585,13 @@ def set( self.dispatch.bulk_replace(state, new_values, evt) - old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) + # propagate NO_RAISE in passive through to the get() for the + # existing object (ticket #8862) + old = self.get( + state, + dict_, + passive=PASSIVE_ONLY_PERSISTENT ^ (passive & NO_RAISE), + ) if old is PASSIVE_NO_RESULT: old = self._default_value(state, dict_) elif old is orig_iterable: diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index 8e94d7b3845..c2f87b54a1a 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -162,6 +162,12 @@ canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK, ) +PASSIVE_MERGE = util.symbol( + "PASSIVE_OFF | NO_RAISE", + "Symbol used specifically for session.merge() and similar cases", + canonical=PASSIVE_OFF | NO_RAISE, +) + DEFAULT_MANAGER_ATTR = "_sa_class_manager" DEFAULT_STATE_ATTR = "_sa_instance_state" diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index 9a6cfb68cc5..4e3664a0cb9 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -21,6 +21,7 @@ from . import attributes from .base import _is_mapped_class +from .base import PASSIVE_MERGE from .base import state_str from .interfaces import MANYTOMANY from .interfaces import MANYTOONE @@ -1048,7 +1049,7 @@ class that will be synchronized with this one. It is usually if cascade is not False: self.cascade = cascade elif self.viewonly: - self.cascade = "none" + self.cascade = "merge" else: self.cascade = "save-update, merge" @@ -1904,7 +1905,9 @@ def merge( # map for those already present. # also assumes CollectionAttributeImpl behavior of loading # "old" list in any case - dest_state.get_impl(self.key).get(dest_state, dest_dict) + dest_state.get_impl(self.key).get( + dest_state, dest_dict, passive=PASSIVE_MERGE + ) dest_list = [] for current in instances_iterable: @@ -1929,7 +1932,11 @@ def merge( coll.append_without_event(c) else: dest_state.get_impl(self.key).set( - dest_state, dest_dict, dest_list, _adapt=False + dest_state, + dest_dict, + dest_list, + _adapt=False, + passive=PASSIVE_MERGE, ) else: current = source_dict[self.key] diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 265f62660f8..1a9699a0e07 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -70,7 +70,7 @@ class CascadeOptions(frozenset): ) _allowed_cascades = all_cascades - _viewonly_cascades = ["expunge", "all", "none", "refresh-expire"] + _viewonly_cascades = ["expunge", "all", "none", "refresh-expire", "merge"] __slots__ = ( "save_update", diff --git a/test/orm/test_cascade.py b/test/orm/test_cascade.py index c5dd946e75e..dd23f84377e 100644 --- a/test/orm/test_cascade.py +++ b/test/orm/test_cascade.py @@ -4296,7 +4296,7 @@ class Order(cls.Comparable): ({"delete"}, {"delete"}), ( {"all, delete-orphan"}, - {"delete", "delete-orphan", "merge", "save-update"}, + {"delete", "delete-orphan", "save-update"}, ), ({"save-update, expunge"}, {"save-update"}), ) @@ -4403,7 +4403,10 @@ def test_default_none_cascade(self): not_in(o1, sess) not_in(o2, sess) - def test_default_merge_cascade(self): + @testing.combinations( + "persistent", "pending", argnames="collection_status" + ) + def test_default_merge_cascade(self, collection_status): User, Order, orders, users = ( self.classes.User, self.classes.Order, @@ -4435,12 +4438,31 @@ def test_default_merge_cascade(self): Order(id=2, user_id=1, description="someotherorder"), ) - u1.orders.append(o1) - u1.orders.append(o2) + if collection_status == "pending": + # technically this is pointless, one should not be appending + # to this collection + u1.orders.append(o1) + u1.orders.append(o2) + elif collection_status == "persistent": + sess.add(u1) + sess.flush() + sess.add_all([o1, o2]) + sess.flush() + u1.orders + else: + assert False u1 = sess.merge(u1) - assert not u1.orders + # in 1.4, as of #4993 this was asserting that u1.orders would + # not be present in the new object. However, as observed during + # #8862, this defeats schemes that seek to restore fully loaded + # objects from caches which may even have lazy="raise", but + # in any case would want to not emit new SQL on those collections. + # so we assert here that u1.orders is in fact present + assert "orders" in u1.__dict__ + assert u1.__dict__["orders"] + assert u1.orders def test_default_cascade(self): User, Order, orders, users = ( @@ -4466,7 +4488,7 @@ def test_default_cascade(self): }, ) - eq_(umapper.attrs["orders"].cascade, set()) + eq_(umapper.attrs["orders"].cascade, {"merge"}) def test_write_cascade_disallowed_w_viewonly(self): @@ -4474,7 +4496,7 @@ def test_write_cascade_disallowed_w_viewonly(self): assert_raises_message( sa_exc.ArgumentError, - 'Cascade settings "delete, delete-orphan, merge, save-update" ' + 'Cascade settings "delete, delete-orphan, save-update" ' "apply to persistence operations", relationship, Order, diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py index 3e29d5cd796..86277550083 100644 --- a/test/orm/test_merge.py +++ b/test/orm/test_merge.py @@ -7,6 +7,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import PickleType +from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import Text @@ -16,6 +17,7 @@ from sqlalchemy.orm import defer from sqlalchemy.orm import deferred from sqlalchemy.orm import foreign +from sqlalchemy.orm import joinedload from sqlalchemy.orm import relationship from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session @@ -28,6 +30,7 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import in_ from sqlalchemy.testing import not_in +from sqlalchemy.testing.assertsql import CountStatements from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table @@ -1396,6 +1399,126 @@ def test_no_load_preserves_parents(self): except sa.exc.InvalidRequestError as e: assert "load=False option does not support" in str(e) + @testing.combinations("viewonly", "normal", argnames="viewonly") + @testing.combinations("load", "noload", argnames="load") + @testing.combinations("select", "raise", "raise_on_sql", argnames="lazy") + @testing.combinations( + "merge_persistent", "merge_detached", argnames="merge_persistent" + ) + @testing.combinations("detached", "persistent", argnames="detach_original") + @testing.combinations("o2m", "m2o", argnames="direction") + def test_relationship_population_maintained( + self, + viewonly, + load, + lazy, + merge_persistent, + direction, + detach_original, + ): + """test #8862""" + + User, Address = self.classes("User", "Address") + users, addresses = self.tables("users", "addresses") + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, + viewonly=viewonly == "viewonly", + lazy=lazy, + back_populates="user", + ) + }, + ) + + self.mapper_registry.map_imperatively( + Address, + addresses, + properties={ + "user": relationship( + User, + viewonly=viewonly == "viewonly", + lazy=lazy, + back_populates="addresses", + ) + }, + ) + + s = fixture_session() + + u1 = User(id=1, name="u1") + s.add(u1) + s.flush() + s.add_all( + [Address(user_id=1, email_address="e%d" % i) for i in range(1, 4)] + ) + s.commit() + + if direction == "o2m": + cls_to_merge = User + obj_to_merge = ( + s.scalars(select(User).options(joinedload(User.addresses))) + .unique() + .one() + ) + attrname = "addresses" + + elif direction == "m2o": + cls_to_merge = Address + obj_to_merge = ( + s.scalars( + select(Address) + .filter_by(email_address="e1") + .options(joinedload(Address.user)) + ) + .unique() + .one() + ) + attrname = "user" + else: + assert False + + assert attrname in obj_to_merge.__dict__ + + s2 = Session(testing.db) + + if merge_persistent == "merge_persistent": + target_persistent = s2.get(cls_to_merge, obj_to_merge.id) # noqa + + if detach_original == "detach": + s.expunge(obj_to_merge) + + with self.sql_execution_asserter(testing.db) as assert_: + merged_object = s2.merge(obj_to_merge, load=load == "load") + + assert_.assert_( + CountStatements( + 0 + if load == "noload" + else 1 + if merge_persistent == "merge_persistent" + else 2 + ) + ) + + assert attrname in merged_object.__dict__ + + with self.sql_execution_asserter(testing.db) as assert_: + if direction == "o2m": + eq_( + merged_object.addresses, + [ + Address(user_id=1, email_address="e%d" % i) + for i in range(1, 4) + ], + ) + elif direction == "m2o": + eq_(merged_object.user, User(id=1, name="u1")) + assert_.assert_(CountStatements(0)) + def test_synonym(self): users = self.tables.users From 7a0a76271793ffc3ded684a98927fe84258b9500 Mon Sep 17 00:00:00 2001 From: Michael Gorven Date: Thu, 24 Nov 2022 03:47:26 -0500 Subject: [PATCH 447/632] Fix reflection of constraints in attached schemas Backported a fix for SQLite reflection of unique constraints in attached schemas, released in 2.0 as a small part of :ticket:`4379`. Previously, unique constraints in attached schemas would be ignored by SQLite reflection. Pull request courtesy Michael Gorven. Fixes: #8866 Closes: #8867 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8867 Pull-request-sha: 94a5736170f5c944d3dad1ef91dc8550c72a4dc5 Change-Id: Id414aeed9d6ce58877d81df2459f6d4f308750a8 --- doc/build/changelog/unreleased_14/8866.rst | 8 ++++++++ lib/sqlalchemy/dialects/sqlite/base.py | 2 +- test/dialect/test_sqlite.py | 24 +++++++++++++++++++++- 3 files changed, 32 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8866.rst diff --git a/doc/build/changelog/unreleased_14/8866.rst b/doc/build/changelog/unreleased_14/8866.rst new file mode 100644 index 00000000000..0b82e8d3038 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8866.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, sqlite + :tickets: 8866 + + Backported a fix for SQLite reflection of unique constraints in attached + schemas, released in 2.0 as a small part of :ticket:`4379`. Previously, + unique constraints in attached schemas would be ignored by SQLite + reflection. Pull request courtesy Michael Gorven. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 0959d0417cf..612d8f90632 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2487,7 +2487,7 @@ def get_indexes(self, connection, table_name, schema=None, **kw): # loop thru unique indexes to get the column names. for idx in list(indexes): pragma_index = self._get_table_pragma( - connection, "index_info", idx["name"] + connection, "index_info", idx["name"], schema=schema ) for row in pragma_index: diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index ff98fea149b..1f7a06dffb5 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -827,7 +827,7 @@ def _fixture(self): Table( "another_created", meta, - Column("bat", Integer), + Column("bat", Integer, unique=True), Column("hoho", String), schema="test_schema", ) @@ -909,6 +909,28 @@ def test_table_names_system(self): {"created", "another_created"}, ) + def test_unique_constraints(self): + self._fixture() + insp = inspect(self.conn) + eq_( + [ + d["column_names"] + for d in insp.get_unique_constraints( + "created", schema="test_schema" + ) + ], + [], + ) + eq_( + [ + d["column_names"] + for d in insp.get_unique_constraints( + "another_created", schema="test_schema" + ) + ], + [["bat"]], + ) + def test_schema_names(self): self._fixture() insp = inspect(self.conn) From efbd314b9cdb1e39cd68320373bfcb7df8d7f338 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 24 Nov 2022 09:52:12 -0500 Subject: [PATCH 448/632] assert unique constraints received back in #8867 we can see our existing uq reflection test is broken, not detecting a failure to detect constraints Change-Id: Icada02bc0547c5a3d8c471b80a78a2e72f02647d (cherry picked from commit f99300c4e8d1317a94cbfeaec6fe22de8f1159f7) --- lib/sqlalchemy/testing/suite/test_reflection.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 3f234d2ea9c..12949fe02bb 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1079,6 +1079,8 @@ def test_get_unique_constraints(self, metadata, connection, use_schema): names_that_duplicate_index = set() + eq_(len(uniques), len(reflected)) + for orig, refl in zip(uniques, reflected): # Different dialects handle duplicate index and constraints # differently, so ignore this flag From f4cf0f93f4bc1cbb5dc3ba2ed7a7f43a575e6d91 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 26 Nov 2022 10:14:02 -0500 Subject: [PATCH 449/632] sort for addresess relationship in new merge test saw a random failure under py2 on 1.4, will backport Change-Id: I3e2b037bf4211be44e28f85f5e51ffdc218eeb5a (cherry picked from commit 34e29f7b225cf1305e151af9d03ef95f42a9dbcc) --- test/orm/test_merge.py | 1 + 1 file changed, 1 insertion(+) diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py index 86277550083..67307ed6cfd 100644 --- a/test/orm/test_merge.py +++ b/test/orm/test_merge.py @@ -1430,6 +1430,7 @@ def test_relationship_population_maintained( viewonly=viewonly == "viewonly", lazy=lazy, back_populates="user", + order_by=addresses.c.id, ) }, ) From 93afb76519380bb86fe5ecf62e75ddfd62c337e4 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 25 Nov 2022 16:49:28 -0500 Subject: [PATCH 450/632] improve column targeting issues with query_expression Fixed issues in :func:`_orm.with_expression` where expressions that were composed of columns within a subquery being SELECTed from, or when using ``.from_statement()``, would not render correct SQL **if** the expression had a label name that matched the attribute which used :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had no default expression. For the moment, if the :func:`_orm.query_expression` **does** have a default expression, that label name is still used for that default, and an additional label with the same name will be ignored. Overall, this case is pretty thorny so further adjustments might be warranted. Fixes: #8881 Change-Id: Ie939b1470cb2e824717384be65f4cd8edd619942 (cherry picked from commit 474326e87038f997fb9423c56379b8ba19a5e43b) --- doc/build/changelog/unreleased_14/8881.rst | 14 ++ lib/sqlalchemy/orm/properties.py | 3 + test/orm/test_core_compilation.py | 89 +++++++++++- test/orm/test_deferred.py | 156 ++++++++++++++++++++- 4 files changed, 259 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8881.rst diff --git a/doc/build/changelog/unreleased_14/8881.rst b/doc/build/changelog/unreleased_14/8881.rst new file mode 100644 index 00000000000..f3fe5e66e74 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8881.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, orm + :tickets: 8881 + + Fixed issues in :func:`_orm.with_expression` where expressions that were + composed of columns within a subquery being SELECTed from, or when using + ``.from_statement()``, would not render correct SQL **if** the expression + had a label name that matched the attribute which used + :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had + no default expression. For the moment, if the :func:`_orm.query_expression` + **does** have a default expression, that label name is still used for that + default, and an additional label with the same name will be ignored. + Overall, this case is pretty thorny so further adjustments might be + warranted. diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index d32af17464c..19a18173f78 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -197,6 +197,9 @@ def __init__(self, *columns, **kwargs): self.strategy_key += (("raiseload", True),) def _memoized_attr__renders_in_subqueries(self): + if ("query_expression", True) in self.strategy_key: + return self.strategy._have_default_expression + return ("deferred", True) not in self.strategy_key or ( self not in self.parent._readonly_props ) diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index c0c530b4c07..16bdbf2fd4d 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -7,6 +7,7 @@ from sqlalchemy import insert from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import literal_column from sqlalchemy import null from sqlalchemy import or_ @@ -933,6 +934,10 @@ def query_expression_fixture(self): properties=util.OrderedDict( [ ("value", query_expression()), + ( + "value_w_default", + query_expression(default_expr=literal(15)), + ), ] ), ) @@ -940,6 +945,24 @@ def query_expression_fixture(self): return User + @testing.fixture + def deferred_fixture(self): + User = self.classes.User + users = self.tables.users + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "name": deferred(users.c.name), + "name_upper": column_property( + func.upper(users.c.name), deferred=True + ), + }, + ) + + return User + @testing.fixture def query_expression_w_joinedload_fixture(self): users, User = ( @@ -1080,10 +1103,71 @@ def test_with_expr_one(self, query_expression_fixture): self.assert_compile( stmt, - "SELECT users.name || :name_1 AS anon_1, users.id, " + "SELECT users.name || :name_1 AS anon_1, :param_1 AS anon_2, " + "users.id, " "users.name FROM users", ) + def test_exported_columns_query_expression(self, query_expression_fixture): + """test behaviors related to #8881""" + User = query_expression_fixture + + stmt = select(User) + + eq_( + stmt.selected_columns.keys(), + ["value_w_default", "id", "name"], + ) + + stmt = select(User).options( + with_expression(User.value, User.name + "foo") + ) + + # bigger problem. we still don't include 'value', because we dont + # run query options here. not "correct", but is at least consistent + # with deferred + eq_( + stmt.selected_columns.keys(), + ["value_w_default", "id", "name"], + ) + + def test_exported_columns_colprop(self, column_property_fixture): + """test behaviors related to #8881""" + User, _ = column_property_fixture + + stmt = select(User) + + # we get all the cols because they are not deferred and have a value + eq_( + stmt.selected_columns.keys(), + ["concat", "count", "id", "name"], + ) + + def test_exported_columns_deferred(self, deferred_fixture): + """test behaviors related to #8881""" + User = deferred_fixture + + stmt = select(User) + + # don't include 'name_upper' as it's deferred and readonly. + # "name" however is a column on the table, so even though it is + # deferred, it gets special treatment (related to #6661) + eq_( + stmt.selected_columns.keys(), + ["name", "id"], + ) + + stmt = select(User).options( + undefer(User.name), undefer(User.name_upper) + ) + + # undefer doesn't affect the readonly col because we dont look + # at options when we do selected_columns + eq_( + stmt.selected_columns.keys(), + ["name", "id"], + ) + def test_with_expr_two(self, query_expression_fixture): User = query_expression_fixture @@ -1096,7 +1180,8 @@ def test_with_expr_two(self, query_expression_fixture): self.assert_compile( stmt, - "SELECT anon_1.foo, anon_1.id, anon_1.name FROM " + "SELECT anon_1.foo, :param_1 AS anon_2, anon_1.id, " + "anon_1.name FROM " "(SELECT users.id AS id, users.name AS name, " "users.name || :name_1 AS foo FROM users) AS anon_1", ) diff --git a/test/orm/test_deferred.py b/test/orm/test_deferred.py index 7afaad1e9dc..dcf0d683400 100644 --- a/test/orm/test_deferred.py +++ b/test/orm/test_deferred.py @@ -6,6 +6,7 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import union_all from sqlalchemy import util from sqlalchemy.orm import aliased from sqlalchemy.orm import attributes @@ -1746,6 +1747,14 @@ class A(fixtures.ComparableEntity, Base): bs = relationship("B", order_by="B.id") + class A_default(fixtures.ComparableEntity, Base): + __tablename__ = "a_default" + id = Column(Integer, primary_key=True) + x = Column(Integer) + y = Column(Integer) + + my_expr = query_expression(default_expr=literal(15)) + class B(fixtures.ComparableEntity, Base): __tablename__ = "b" id = Column(Integer, primary_key=True) @@ -1764,7 +1773,7 @@ class C(fixtures.ComparableEntity, Base): @classmethod def insert_data(cls, connection): - A, B, C = cls.classes("A", "B", "C") + A, A_default, B, C = cls.classes("A", "A_default", "B", "C") s = Session(connection) s.add_all( @@ -1775,6 +1784,8 @@ def insert_data(cls, connection): A(id=4, x=2, y=10, bs=[B(id=4, p=19, q=8), B(id=5, p=5, q=5)]), C(id=1, x=1), C(id=2, x=2), + A_default(id=1, x=1, y=2), + A_default(id=2, x=2, y=3), ] ) @@ -1949,6 +1960,149 @@ def test_dont_explode_on_expire_whole(self): q.first() eq_(a1.my_expr, 5) + @testing.combinations("core", "orm", argnames="use_core") + @testing.combinations( + "from_statement", "aliased", argnames="use_from_statement" + ) + @testing.combinations( + "same_name", "different_name", argnames="use_same_labelname" + ) + @testing.combinations( + "has_default", "no_default", argnames="attr_has_default" + ) + def test_expr_from_subq_plain( + self, + use_core, + use_from_statement, + use_same_labelname, + attr_has_default, + ): + """test #8881""" + + if attr_has_default == "has_default": + A = self.classes.A_default + else: + A = self.classes.A + + s = fixture_session() + + if use_same_labelname == "same_name": + labelname = "my_expr" + else: + labelname = "hi" + + if use_core == "core": + stmt = select(A.__table__, literal(12).label(labelname)) + else: + stmt = select(A, literal(12).label(labelname)) + + if use_from_statement == "aliased": + subq = stmt.subquery() + a1 = aliased(A, subq) + stmt = select(a1).options( + with_expression(a1.my_expr, subq.c[labelname]) + ) + else: + subq = stmt + stmt = ( + select(A) + .options( + with_expression( + A.my_expr, subq.selected_columns[labelname] + ) + ) + .from_statement(subq) + ) + + a_obj = s.scalars(stmt).first() + + if ( + use_same_labelname == "same_name" + and attr_has_default == "has_default" + and use_core == "orm" + ): + eq_(a_obj.my_expr, 15) + else: + eq_(a_obj.my_expr, 12) + + @testing.combinations("core", "orm", argnames="use_core") + @testing.combinations( + "from_statement", "aliased", argnames="use_from_statement" + ) + @testing.combinations( + "same_name", "different_name", argnames="use_same_labelname" + ) + @testing.combinations( + "has_default", "no_default", argnames="attr_has_default" + ) + def test_expr_from_subq_union( + self, + use_core, + use_from_statement, + use_same_labelname, + attr_has_default, + ): + """test #8881""" + + if attr_has_default == "has_default": + A = self.classes.A_default + else: + A = self.classes.A + + s = fixture_session() + + if use_same_labelname == "same_name": + labelname = "my_expr" + else: + labelname = "hi" + + if use_core == "core": + stmt = union_all( + select(A.__table__, literal(12).label(labelname)).where( + A.__table__.c.id == 1 + ), + select(A.__table__, literal(18).label(labelname)).where( + A.__table__.c.id == 2 + ), + ) + + else: + stmt = union_all( + select(A, literal(12).label(labelname)).where(A.id == 1), + select(A, literal(18).label(labelname)).where(A.id == 2), + ) + + if use_from_statement == "aliased": + subq = stmt.subquery() + a1 = aliased(A, subq) + stmt = select(a1).options( + with_expression(a1.my_expr, subq.c[labelname]) + ) + else: + subq = stmt + stmt = ( + select(A) + .options( + with_expression( + A.my_expr, subq.selected_columns[labelname] + ) + ) + .from_statement(subq) + ) + + a_objs = s.scalars(stmt).all() + + if ( + use_same_labelname == "same_name" + and attr_has_default == "has_default" + and use_core == "orm" + ): + eq_(a_objs[0].my_expr, 15) + eq_(a_objs[1].my_expr, 15) + else: + eq_(a_objs[0].my_expr, 12) + eq_(a_objs[1].my_expr, 18) + class RaiseLoadTest(fixtures.DeclarativeMappedTest): @classmethod From 337dc2dd1ca2c982a1b0d7ac162e860bfb804366 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 26 Nov 2022 11:03:45 -0500 Subject: [PATCH 451/632] add new variation helper I'm using a lot of @testing.combinations with either a boolean True/False, or a series of string names, each indicating some case to switch on. I want a descriptive name in the test run (not True/False) and I don't want to compare strings. So make a new helper around @combinations that provides an object interface that has booleans inside of it, prints nicely in the test output, raises an error if you name the case incorrectly. Before: test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-False-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-False-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-False-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-True-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-True-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[False-True-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-False-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-False-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-False-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-True-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-True-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[True-True-name] PASSED After: test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-deferred-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-deferred-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-deferred-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-not_deferred-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-not_deferred-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[not_use_add_property-not_deferred-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-deferred-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-deferred-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-deferred-name] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-not_deferred-both] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-not_deferred-key] PASSED test/orm/declarative/test_typed_mapping.py::MappedColumnTest::test_separate_name[use_add_property-not_deferred-name] PASSED Change-Id: Idde87632581ee69e0f47360966758583dfd8baab (cherry picked from commit 3ffa8dccc224d7b7d604bdfb684c437f4cb42f92) --- lib/sqlalchemy/testing/__init__.py | 1 + lib/sqlalchemy/testing/config.py | 75 ++++++++++++++++++++++++++++++ test/orm/test_merge.py | 40 ++++++++-------- 3 files changed, 96 insertions(+), 20 deletions(-) diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index 73b43f04bd4..7d47210452d 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -50,6 +50,7 @@ from .config import fixture from .config import requirements as requires from .config import skip_test +from .config import variation from .exclusions import _is_excluded from .exclusions import _server_version from .exclusions import against as _against diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index fc13a165579..7d19b99be5e 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -94,6 +94,81 @@ def combinations_list(arg_iterable, **kw): return combinations(*arg_iterable, **kw) +class _variation_base(object): + __slots__ = ("name", "argname") + + def __init__(self, case, argname, case_names): + self.name = case + self.argname = argname + for casename in case_names: + setattr(self, casename, casename == case) + + def __bool__(self): + return self.name == self.argname + + def __nonzero__(self): + return not self.__bool__() + + +def variation(argname, cases): + """a helper around testing.combinations that provides a single namespace + that can be used as a switch. + + e.g.:: + + @testing.variation("querytyp", ["select", "subquery", "legacy_query"]) + @testing.variation("lazy", ["select", "raise", "raise_on_sql"]) + def test_thing( + self, + querytyp, + lazy, + decl_base + ): + class Thing(decl_base): + __tablename__ = 'thing' + + # use name directly + rel = relationship("Rel", lazy=lazy.name) + + # use as a switch + if querytyp.select: + stmt = select(Thing) + elif querytyp.subquery: + stmt = select(Thing).subquery() + elif querytyp.legacy_query: + stmt = Session.query(Thing) + else: + assert False + + + The variable provided is a slots object of boolean variables, as well + as the name of the case itself under the attribute ".name" + + """ + + case_names = [ + argname if c is True else "not_" + argname if c is False else c + for c in cases + ] + + typ = type( + argname, + (_variation_base,), + { + "__slots__": tuple(case_names), + }, + ) + + return combinations( + *[ + (casename, typ(casename, argname, case_names)) + for casename in case_names + ], + id_="ia", + argnames=argname + ) + + def fixture(*arg, **kw): return _fixture_functions.fixture(*arg, **kw) diff --git a/test/orm/test_merge.py b/test/orm/test_merge.py index 67307ed6cfd..4f3b4e49561 100644 --- a/test/orm/test_merge.py +++ b/test/orm/test_merge.py @@ -1399,14 +1399,14 @@ def test_no_load_preserves_parents(self): except sa.exc.InvalidRequestError as e: assert "load=False option does not support" in str(e) - @testing.combinations("viewonly", "normal", argnames="viewonly") - @testing.combinations("load", "noload", argnames="load") - @testing.combinations("select", "raise", "raise_on_sql", argnames="lazy") - @testing.combinations( - "merge_persistent", "merge_detached", argnames="merge_persistent" + @testing.variation("viewonly", ["viewonly", "normal"]) + @testing.variation("load", ["load", "noload"]) + @testing.variation("lazy", ["select", "raise", "raise_on_sql"]) + @testing.variation( + "merge_persistent", ["merge_persistent", "merge_detached"] ) - @testing.combinations("detached", "persistent", argnames="detach_original") - @testing.combinations("o2m", "m2o", argnames="direction") + @testing.variation("detach_original", ["detach", "persistent"]) + @testing.variation("direction", ["o2m", "m2o"]) def test_relationship_population_maintained( self, viewonly, @@ -1427,8 +1427,8 @@ def test_relationship_population_maintained( properties={ "addresses": relationship( Address, - viewonly=viewonly == "viewonly", - lazy=lazy, + viewonly=viewonly.viewonly, + lazy=lazy.name, back_populates="user", order_by=addresses.c.id, ) @@ -1441,8 +1441,8 @@ def test_relationship_population_maintained( properties={ "user": relationship( User, - viewonly=viewonly == "viewonly", - lazy=lazy, + viewonly=viewonly.viewonly, + lazy=lazy.name, back_populates="addresses", ) }, @@ -1458,7 +1458,7 @@ def test_relationship_population_maintained( ) s.commit() - if direction == "o2m": + if direction.o2m: cls_to_merge = User obj_to_merge = ( s.scalars(select(User).options(joinedload(User.addresses))) @@ -1467,7 +1467,7 @@ def test_relationship_population_maintained( ) attrname = "addresses" - elif direction == "m2o": + elif direction.m2o: cls_to_merge = Address obj_to_merge = ( s.scalars( @@ -1486,21 +1486,21 @@ def test_relationship_population_maintained( s2 = Session(testing.db) - if merge_persistent == "merge_persistent": + if merge_persistent.merge_persistent: target_persistent = s2.get(cls_to_merge, obj_to_merge.id) # noqa - if detach_original == "detach": + if detach_original.detach: s.expunge(obj_to_merge) with self.sql_execution_asserter(testing.db) as assert_: - merged_object = s2.merge(obj_to_merge, load=load == "load") + merged_object = s2.merge(obj_to_merge, load=load.load) assert_.assert_( CountStatements( 0 - if load == "noload" + if load.noload else 1 - if merge_persistent == "merge_persistent" + if merge_persistent.merge_persistent else 2 ) ) @@ -1508,7 +1508,7 @@ def test_relationship_population_maintained( assert attrname in merged_object.__dict__ with self.sql_execution_asserter(testing.db) as assert_: - if direction == "o2m": + if direction.o2m: eq_( merged_object.addresses, [ @@ -1516,7 +1516,7 @@ def test_relationship_population_maintained( for i in range(1, 4) ], ) - elif direction == "m2o": + elif direction.m2o: eq_(merged_object.user, User(id=1, name="u1")) assert_.assert_(CountStatements(0)) From fc72bf8edcf24c38945cfb27cb095ea2eeb68ccf Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Thu, 24 Nov 2022 10:38:20 -0700 Subject: [PATCH 452/632] Add recommendation for URL.create() re: escaping Let users know that URL.create() can build the whole connection URL instead of making them escape things like passwords ad-hoc. includes some general cleanup of URL docstring by mike Change-Id: Ic71bb0201fecf30e1db11e006c269f2d041b5439 (cherry picked from commit 14c73685ba909838fb90d762d465e7ae8d067c15) --- doc/build/core/engines.rst | 50 +++++++++++++++++++++++++++++++++++- lib/sqlalchemy/engine/url.py | 24 ++++++++++------- 2 files changed, 64 insertions(+), 10 deletions(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index f27caa2d4f7..ba800d69830 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -79,7 +79,8 @@ known driver available for that backend. Escaping Special Characters such as @ signs in Passwords ---------------------------------------------------------- -As the URL is like any other URL, **special characters such as those that may +When constructing a fully formed URL string to pass to +:func:`_sa.create_engine`, **special characters such as those that may be used in the user and password need to be URL encoded to be parsed correctly.**. **This includes the @ sign**. @@ -99,12 +100,59 @@ The encoding for the above password can be generated using >>> urllib.parse.quote_plus("kx@jj5/g") 'kx%40jj5%2Fg' +The URL may then be passed as a string to :func:`_sa.create_engine`:: + + from sqlalchemy import create_engine + + engine = create_engine("postgresql+pg8000://dbuser:kx%40jj5%2Fg@pghost10/appdb") + +As an alternative to escaping special characters in order to create a complete +URL string, the object passed to :func:`_sa.create_engine` may instead be an +instance of the :class:`.URL` object, which bypasses the parsing +phase and can accommodate for unescaped strings directly. See the next +section for an example. + .. versionchanged:: 1.4 Support for ``@`` signs in hostnames and database names has been fixed. As a side effect of this fix, ``@`` signs in passwords must be escaped. +Creating URLs Programmatically +------------------------------- + +The value passed to :func:`_sa.create_engine` may be an instance of +:class:`.URL`, instead of a plain string, which bypasses the need for string +parsing to be used, and therefore does not need an escaped URL string to be +provided. + +The :class:`.URL` object is created using the :meth:`_engine.URL.create()` +constructor method, passing all fields individually. Special characters +such as those within passwords may be passed without any modification:: + + from sqlalchemy import URL + + url_object = URL.create( + "postgresql+pg8000", + username="dbuser", + password="kx@jj5/g", # plain (unescaped) text + host="pghost10", + database="appdb", + ) + +The constructed :class:`.URL` object may then be passed directly to +:func:`_sa.create_engine` in place of a string argument:: + + from sqlalchemy import create_engine + + engine = create_engine(url_object) + +.. seealso:: + + :class:`.URL` + + :meth:`.URL.create` + Backend-Specific URLs ---------------------- diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index a8138c1b48c..5b54d40d0ac 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -42,15 +42,21 @@ class URL( """ Represent the components of a URL used to connect to a database. - This object is suitable to be passed directly to a - :func:`_sa.create_engine` call. The fields of the URL are parsed from a - string by the :func:`.make_url` function. The string format of the URL - generally follows `RFC-1738 `_, with - some exceptions. - - To create a new :class:`_engine.URL` object, use the - :func:`_engine.url.make_url` function. To construct a :class:`_engine.URL` - programmatically, use the :meth:`_engine.URL.create` constructor. + URLs are typically constructed from a fully formatted URL string, where the + :func:`.make_url` function is used internally by the + :func:`_sa.create_engine` function in order to parse the URL string into + its individual components, which are then used to construct a new + :class:`.URL` object. When parsing from a formatted URL string, the parsing + format generally follows + `RFC-1738 `_, with some exceptions. + + A :class:`_engine.URL` object may also be produced directly, either by + using the :func:`.make_url` function with a fully formed URL string, or + by using the :meth:`_engine.URL.create` constructor in order + to construct a :class:`_engine.URL` programmatically given individual + fields. The resulting :class:`.URL` object may be passed directly to + :func:`_sa.create_engine` in place of a string argument, which will bypass + the usage of :func:`.make_url` within the engine's creation process. .. versionchanged:: 1.4 From 5cb7bf8137becbc40d12ffd4dcaf49d23913a926 Mon Sep 17 00:00:00 2001 From: Tobias Pfeiffer Date: Mon, 28 Nov 2022 07:52:31 -0500 Subject: [PATCH 453/632] add partial index predicate to SQLiteDialect.get_indexes() result Added support for reflection of expression-oriented WHERE criteria included in indexes on the SQLite dialect, in a manner similar to that of the PostgreSQL dialect. Pull request courtesy Tobias Pfeiffer. Fixes: #8804 Closes: #8806 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8806 Pull-request-sha: 539dfcb372360911b69aed2a804698bb1a2220b1 Change-Id: I0e34d47dbe2b9c1da6fce531363084843e5127a3 (cherry picked from commit ed39e846cd8ae2714c47fc3d563582f72483df0c) --- doc/build/changelog/unreleased_14/8804.rst | 7 +++ lib/sqlalchemy/dialects/sqlite/base.py | 51 ++++++++++++++++++- .../testing/suite/test_reflection.py | 13 +++-- test/dialect/test_sqlite.py | 51 +++++++++++++++++++ 4 files changed, 116 insertions(+), 6 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8804.rst diff --git a/doc/build/changelog/unreleased_14/8804.rst b/doc/build/changelog/unreleased_14/8804.rst new file mode 100644 index 00000000000..c3f91a16d2c --- /dev/null +++ b/doc/build/changelog/unreleased_14/8804.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, sqlite + :tickets: 8804 + + Added support for reflection of expression-oriented WHERE criteria included + in indexes on the SQLite dialect, in a manner similar to that of the + PostgreSQL dialect. Pull request courtesy Tobias Pfeiffer. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 612d8f90632..24166717a41 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -821,6 +821,7 @@ def set_sqlite_pragma(dbapi_connection, connection_record): from ... import processors from ... import schema as sa_schema from ... import sql +from ... import text from ... import types as sqltypes from ... import util from ...engine import default @@ -2474,6 +2475,21 @@ def get_indexes(self, connection, table_name, schema=None, **kw): ) indexes = [] + # regular expression to extract the filter predicate of a partial + # index. this could fail to extract the predicate correctly on + # indexes created like + # CREATE INDEX i ON t (col || ') where') WHERE col <> '' + # but as this function does not support expression-based indexes + # this case does not occur. + partial_pred_re = re.compile(r"\)\s+where\s+(.+)", re.IGNORECASE) + + if schema: + schema_expr = "%s." % self.identifier_preparer.quote_identifier( + schema + ) + else: + schema_expr = "" + include_auto_indexes = kw.pop("include_auto_indexes", False) for row in pragma_indexes: # ignore implicit primary key index. @@ -2482,7 +2498,38 @@ def get_indexes(self, connection, table_name, schema=None, **kw): "sqlite_autoindex" ): continue - indexes.append(dict(name=row[1], column_names=[], unique=row[2])) + indexes.append( + dict( + name=row[1], + column_names=[], + unique=row[2], + dialect_options={}, + ) + ) + + # check partial indexes + if row[4]: + s = ( + "SELECT sql FROM %(schema)ssqlite_master " + "WHERE name = ? " + "AND type = 'index'" % {"schema": schema_expr} + ) + rs = connection.exec_driver_sql(s, (row[1],)) + index_sql = rs.scalar() + predicate_match = partial_pred_re.search(index_sql) + if predicate_match is None: + # unless the regex is broken this case shouldn't happen + # because we know this is a partial index, so the + # definition sql should match the regex + util.warn( + "Failed to look up filter predicate of " + "partial index %s" % row[1] + ) + else: + predicate = predicate_match.group(1) + indexes[-1]["dialect_options"]["sqlite_where"] = text( + predicate + ) # loop thru unique indexes to get the column names. for idx in list(indexes): @@ -2500,6 +2547,8 @@ def get_indexes(self, connection, table_name, schema=None, **kw): break else: idx["column_names"].append(row[2]) + + indexes.sort(key=lambda d: d["name"] or "~") # sort None as last return indexes @reflection.cache diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 12949fe02bb..4e575046d37 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1300,8 +1300,14 @@ def test_reflect_expression_based_indexes(self, metadata, connection): insp = inspect(connection) expected = [ - {"name": "t_idx_2", "column_names": ["x"], "unique": False} + { + "name": "t_idx_2", + "column_names": ["x"], + "unique": False, + "dialect_options": {}, + } ] + if testing.requires.index_reflects_included_columns.enabled: expected[0]["include_columns"] = [] expected[0]["dialect_options"] = { @@ -1311,10 +1317,7 @@ def test_reflect_expression_based_indexes(self, metadata, connection): with expect_warnings( "Skipped unsupported reflection of expression-based index t_idx" ): - eq_( - insp.get_indexes("t"), - expected, - ) + eq_(insp.get_indexes("t"), expected) @testing.requires.index_reflects_included_columns def test_reflect_covering_index(self, metadata, connection): diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 1f7a06dffb5..3da4d6574b6 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -2312,6 +2312,7 @@ def test_dont_reflect_autoindex(self): "unique": 1, "name": "sqlite_autoindex_o_1", "column_names": ["foo"], + "dialect_options": {}, } ], ) @@ -2327,10 +2328,60 @@ def test_create_index_with_schema(self): "unique": 0, "name": u"ix_main_l_bar", "column_names": [u"bar"], + "dialect_options": {}, } ], ) + def test_reflect_partial_indexes(self, connection): + connection.exec_driver_sql( + "create table foo_with_partial_index (x integer, y integer)" + ) + connection.exec_driver_sql( + "create unique index ix_partial on " + "foo_with_partial_index (x) where y > 10" + ) + connection.exec_driver_sql( + "create unique index ix_no_partial on " + "foo_with_partial_index (x)" + ) + connection.exec_driver_sql( + "create unique index ix_partial2 on " + "foo_with_partial_index (x, y) where " + "y = 10 or abs(x) < 5" + ) + + inspector = inspect(connection) + indexes = inspector.get_indexes("foo_with_partial_index") + eq_( + indexes, + [ + { + "unique": 1, + "name": "ix_no_partial", + "column_names": ["x"], + "dialect_options": {}, + }, + { + "unique": 1, + "name": "ix_partial", + "column_names": ["x"], + "dialect_options": {"sqlite_where": mock.ANY}, + }, + { + "unique": 1, + "name": "ix_partial2", + "column_names": ["x", "y"], + "dialect_options": {"sqlite_where": mock.ANY}, + }, + ], + ) + eq_(indexes[1]["dialect_options"]["sqlite_where"].text, "y > 10") + eq_( + indexes[2]["dialect_options"]["sqlite_where"].text, + "y = 10 or abs(x) < 5", + ) + def test_unique_constraint_named(self): inspector = inspect(testing.db) eq_( From d57558ed88a14bab5f11be045779698f7886763f Mon Sep 17 00:00:00 2001 From: Jason Myers Date: Tue, 29 Nov 2022 12:12:12 -0600 Subject: [PATCH 454/632] Add PyAthena Dialect (#8898) * Add PyAthena Dialect * Changing order (cherry picked from commit 07d40578b1bb77456b7a7af425a905b146fdaef1) --- doc/build/dialects/index.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index f08f5197911..48c677da699 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -81,6 +81,8 @@ Currently maintained external dialect projects for SQLAlchemy include: +================================================+=======================================+ | Actian Avalanche, Vector, Actian X, and Ingres | sqlalchemy-ingres_ | +------------------------------------------------+---------------------------------------+ +| Amazon Athena | pyathena_ | ++------------------------------------------------+---------------------------------------+ | Amazon Redshift (via psycopg2) | sqlalchemy-redshift_ | +------------------------------------------------+---------------------------------------+ | Apache Drill | sqlalchemy-drill_ | @@ -157,3 +159,4 @@ Currently maintained external dialect projects for SQLAlchemy include: .. _sqlalchemy-turbodbc: https://pypi.org/project/sqlalchemy-turbodbc/ .. _sqlalchemy-sybase: https://pypi.org/project/sqlalchemy-sybase/ .. _firebolt-sqlalchemy: https://pypi.org/project/firebolt-sqlalchemy/ +.. _pyathena: https://github.com/laughingman7743/PyAthena/ From bd1620beb1adbdf9a9a0b15143bd62cb75d1b2de Mon Sep 17 00:00:00 2001 From: Michael Gorven Date: Tue, 29 Nov 2022 18:36:19 -0500 Subject: [PATCH 455/632] [sqlite] Reflect DEFERRABLE and INITIALLY options for foreign keys Added support for the SQLite backend to reflect the "DEFERRABLE" and "INITIALLY" keywords which may be present on a foreign key construct. Pull request courtesy Michael Gorven. Fixes: #8903 Closes: #8904 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8904 Pull-request-sha: 52aa4cf77482c4051899e21bea75b9830e4c3efa Change-Id: I713906db1a458d8f1be39625841ca3bbc03ec835 (cherry picked from commit 07760011b5176be03c7811e9a45933b473b8b80b) --- doc/build/changelog/unreleased_14/8903.rst | 7 +++ lib/sqlalchemy/dialects/sqlite/base.py | 12 ++++- test/dialect/test_sqlite.py | 59 ++++++++++++++++++++++ 3 files changed, 77 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8903.rst diff --git a/doc/build/changelog/unreleased_14/8903.rst b/doc/build/changelog/unreleased_14/8903.rst new file mode 100644 index 00000000000..fe1590c043f --- /dev/null +++ b/doc/build/changelog/unreleased_14/8903.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, sqlite + :tickets: 8903 + + Added support for the SQLite backend to reflect the "DEFERRABLE" and + "INITIALLY" keywords which may be present on a foreign key construct. Pull + request courtesy Michael Gorven. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 24166717a41..f75610553cb 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2316,6 +2316,8 @@ def parse_fks(): r'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\) *' r"((?:ON (?:DELETE|UPDATE) " r"(?:SET NULL|SET DEFAULT|CASCADE|RESTRICT|NO ACTION) *)*)" + r"((?:NOT +)?DEFERRABLE)?" + r"(?: +INITIALLY +(DEFERRED|IMMEDIATE))?" ) for match in re.finditer(FK_PATTERN, table_data, re.I): ( @@ -2325,7 +2327,9 @@ def parse_fks(): referred_name, referred_columns, onupdatedelete, - ) = match.group(1, 2, 3, 4, 5, 6) + deferrable, + initially, + ) = match.group(1, 2, 3, 4, 5, 6, 7, 8) constrained_columns = list( self._find_cols_in_sig(constrained_columns) ) @@ -2347,6 +2351,12 @@ def parse_fks(): onupdate = token[6:].strip() if onupdate and onupdate != "NO ACTION": options["onupdate"] = onupdate + + if deferrable: + options["deferrable"] = "NOT" not in deferrable.upper() + if initially: + options["initially"] = initially.upper() + yield ( constraint_name, constrained_columns, diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 3da4d6574b6..01ba4164803 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -1910,6 +1910,20 @@ def setup_test_class(cls): "ON UPDATE NO ACTION)" ) + conn.exec_driver_sql( + "CREATE TABLE deferrable_test (id INTEGER PRIMARY KEY, " + "c1 INTEGER, c2 INTEGER, c3 INTEGER, c4 INTEGER, " + "CONSTRAINT fk1 FOREIGN KEY (c1) REFERENCES a1(id) " + "DEFERRABLE," + "CONSTRAINT fk2 FOREIGN KEY (c2) REFERENCES a1(id) " + "NOT DEFERRABLE," + "CONSTRAINT fk3 FOREIGN KEY (c3) REFERENCES a2(id) " + "ON UPDATE CASCADE " + "DEFERRABLE INITIALLY DEFERRED," + "CONSTRAINT fk4 FOREIGN KEY (c4) REFERENCES a2(id) " + "NOT DEFERRABLE INITIALLY IMMEDIATE)" + ) + conn.exec_driver_sql( "CREATE TABLE cp (" "q INTEGER check (q > 1 AND q < 6),\n" @@ -2280,6 +2294,51 @@ def test_foreign_key_ondelete_onupdate(self): ], ) + def test_foreign_key_deferrable_initially(self): + inspector = inspect(testing.db) + fks = inspector.get_foreign_keys("deferrable_test") + eq_( + fks, + [ + { + "referred_table": "a1", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk1", + "constrained_columns": ["c1"], + "options": {"deferrable": True}, + }, + { + "referred_table": "a1", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk2", + "constrained_columns": ["c2"], + "options": {"deferrable": False}, + }, + { + "referred_table": "a2", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk3", + "constrained_columns": ["c3"], + "options": { + "deferrable": True, + "initially": "DEFERRED", + "onupdate": "CASCADE", + }, + }, + { + "referred_table": "a2", + "referred_columns": ["id"], + "referred_schema": None, + "name": "fk4", + "constrained_columns": ["c4"], + "options": {"deferrable": False, "initially": "IMMEDIATE"}, + }, + ], + ) + def test_foreign_key_options_unnamed_inline(self): with testing.db.begin() as conn: conn.exec_driver_sql( From 55ee628e9ef8e0e90786bbb550b124cf4b634f8a Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 19 Nov 2022 20:39:10 +0100 Subject: [PATCH 456/632] Fix positional compiling bugs Fixed a series of issues regarding positionally rendered bound parameters, such as those used for SQLite, asyncpg, MySQL and others. Some compiled forms would not maintain the order of parameters correctly, such as the PostgreSQL ``regexp_replace()`` function as well as within the "nesting" feature of the :class:`.CTE` construct first introduced in :ticket:`4123`. Fixes: #8827 Change-Id: I9813ed7c358cc5c1e26725c48df546b209a442cb (cherry picked from commit 0f2baae6bf72353f785bad394684f2d6fa53e0ef) --- doc/build/changelog/unreleased_14/8827.rst | 9 + lib/sqlalchemy/dialects/oracle/base.py | 22 +- lib/sqlalchemy/dialects/postgresql/base.py | 13 +- lib/sqlalchemy/sql/compiler.py | 109 ++++++++-- lib/sqlalchemy/testing/assertions.py | 64 +++++- test/dialect/oracle/test_compiler.py | 6 +- test/dialect/postgresql/test_compiler.py | 6 +- test/sql/test_compiler.py | 112 ++++++++++ test/sql/test_cte.py | 231 +++++++++++++++++++-- test/sql/test_external_traversal.py | 29 ++- test/sql/test_functions.py | 16 ++ 11 files changed, 543 insertions(+), 74 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8827.rst diff --git a/doc/build/changelog/unreleased_14/8827.rst b/doc/build/changelog/unreleased_14/8827.rst new file mode 100644 index 00000000000..677277e45d7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8827.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 8827 + + Fixed a series of issues regarding positionally rendered bound parameters, + such as those used for SQLite, asyncpg, MySQL and others. Some compiled + forms would not maintain the order of parameters correctly, such as the + PostgreSQL ``regexp_replace()`` function as well as within the "nesting" + feature of the :class:`.CTE` construct first introduced in :ticket:`4123`. diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 77f0dbd2df6..417ab84b7b7 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -941,7 +941,7 @@ def function_argspec(self, fn, **kw): def visit_function(self, func, **kw): text = super(OracleCompiler, self).visit_function(func, **kw) if kw.get("asfrom", False): - text = "TABLE (%s)" % func + text = "TABLE (%s)" % text return text def visit_table_valued_column(self, element, **kw): @@ -1270,20 +1270,18 @@ def visit_is_not_distinct_from_binary(self, binary, operator, **kw): self.process(binary.right), ) - def _get_regexp_args(self, binary, kw): + def visit_regexp_match_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) pattern = self.process(binary.right, **kw) flags = binary.modifiers["flags"] - if flags is not None: - flags = self.process(flags, **kw) - return string, pattern, flags - - def visit_regexp_match_op_binary(self, binary, operator, **kw): - string, pattern, flags = self._get_regexp_args(binary, kw) if flags is None: return "REGEXP_LIKE(%s, %s)" % (string, pattern) else: - return "REGEXP_LIKE(%s, %s, %s)" % (string, pattern, flags) + return "REGEXP_LIKE(%s, %s, %s)" % ( + string, + pattern, + self.process(flags, **kw), + ) def visit_not_regexp_match_op_binary(self, binary, operator, **kw): return "NOT %s" % self.visit_regexp_match_op_binary( @@ -1291,8 +1289,10 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): ) def visit_regexp_replace_op_binary(self, binary, operator, **kw): - string, pattern, flags = self._get_regexp_args(binary, kw) + string = self.process(binary.left, **kw) + pattern = self.process(binary.right, **kw) replacement = self.process(binary.modifiers["replacement"], **kw) + flags = binary.modifiers["flags"] if flags is None: return "REGEXP_REPLACE(%s, %s, %s)" % ( string, @@ -1304,7 +1304,7 @@ def visit_regexp_replace_op_binary(self, binary, operator, **kw): string, pattern, replacement, - flags, + self.process(flags, **kw), ) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c390553353a..9ad8379e26b 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -2386,14 +2386,11 @@ def _regexp_match(self, base_op, binary, operator, kw): return self._generate_generic_binary( binary, " %s* " % base_op, **kw ) - flags = self.process(flags, **kw) - string = self.process(binary.left, **kw) - pattern = self.process(binary.right, **kw) return "%s %s CONCAT('(?', %s, ')', %s)" % ( - string, + self.process(binary.left, **kw), base_op, - flags, - pattern, + self.process(flags, **kw), + self.process(binary.right, **kw), ) def visit_regexp_match_op_binary(self, binary, operator, **kw): @@ -2406,8 +2403,6 @@ def visit_regexp_replace_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) pattern = self.process(binary.right, **kw) flags = binary.modifiers["flags"] - if flags is not None: - flags = self.process(flags, **kw) replacement = self.process(binary.modifiers["replacement"], **kw) if flags is None: return "REGEXP_REPLACE(%s, %s, %s)" % ( @@ -2420,7 +2415,7 @@ def visit_regexp_replace_op_binary(self, binary, operator, **kw): string, pattern, replacement, - flags, + self.process(flags, **kw), ) def visit_empty_set_expr(self, element_types): diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 611cd182187..8fbf3092aaf 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -166,8 +166,8 @@ "named": ":%(name)s", } -_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]]") -_BIND_TRANSLATE_CHARS = dict(zip("%():[]", "PAZC__")) +_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\] ]") +_BIND_TRANSLATE_CHARS = dict(zip("%():[] ", "PAZC___")) OPERATORS = { # binary @@ -713,6 +713,7 @@ class SQLCompiler(Compiled): debugging use cases. """ + positiontup_level = None inline = False @@ -784,6 +785,7 @@ def __init__( # true if the paramstyle is positional self.positional = dialect.positional if self.positional: + self.positiontup_level = {} self.positiontup = [] self._numeric_binds = dialect.paramstyle == "numeric" self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] @@ -894,6 +896,8 @@ def _init_cte_state(self): self.ctes_recursive = False if self.positional: self.cte_positional = {} + self.cte_level = {} + self.cte_order = collections.defaultdict(list) @contextlib.contextmanager def _nested_result(self): @@ -1696,7 +1700,13 @@ def visit_textual_select( text = self.process(taf.element, **kw) if self.ctes: nesting_level = len(self.stack) if not toplevel else None - text = self._render_cte_clause(nesting_level=nesting_level) + text + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) self.stack.pop(-1) @@ -1806,6 +1816,7 @@ def _format_frame_clause(self, range_, **kw): ) def visit_over(self, over, **kwargs): + text = over.element._compiler_dispatch(self, **kwargs) if over.range_: range_ = "RANGE BETWEEN %s" % self._format_frame_clause( over.range_, **kwargs @@ -1818,7 +1829,7 @@ def visit_over(self, over, **kwargs): range_ = None return "%s OVER (%s)" % ( - over.element._compiler_dispatch(self, **kwargs), + text, " ".join( [ "%s BY %s" @@ -1964,7 +1975,9 @@ def visit_compound_select( nesting_level = len(self.stack) if not toplevel else None text = ( self._render_cte_clause( - nesting_level=nesting_level, include_following_stack=True + nesting_level=nesting_level, + include_following_stack=True, + visiting_cte=kwargs.get("visiting_cte"), ) + text ) @@ -2667,7 +2680,8 @@ def bindparam_string( positional_names.append(name) else: self.positiontup.append(name) - elif not escaped_from: + self.positiontup_level[name] = len(self.stack) + if not escaped_from: if _BIND_TRANSLATE_RE.search(name): # not quite the translate use case as we want to @@ -2786,6 +2800,8 @@ def visit_cte( ] } ) + if self.positional: + self.cte_level[cte] = cte_level if pre_alias_cte not in self.ctes: self.visit_cte(pre_alias_cte, **kwargs) @@ -3495,13 +3511,16 @@ def visit_select( if per_dialect: text += " " + self.get_statement_hint_text(per_dialect) - if self.ctes: - # In compound query, CTEs are shared at the compound level - if not is_embedded_select: - nesting_level = len(self.stack) if not toplevel else None - text = ( - self._render_cte_clause(nesting_level=nesting_level) + text + # In compound query, CTEs are shared at the compound level + if self.ctes and (not is_embedded_select or toplevel): + nesting_level = len(self.stack) if not toplevel else None + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kwargs.get("visiting_cte"), ) + + text + ) if select_stmt._suffixes: text += " " + self._generate_prefixes( @@ -3677,6 +3696,7 @@ def _render_cte_clause( self, nesting_level=None, include_following_stack=False, + visiting_cte=None, ): """ include_following_stack @@ -3706,14 +3726,48 @@ def _render_cte_clause( if not ctes: return "" - ctes_recursive = any([cte.recursive for cte in ctes]) if self.positional: - self.positiontup = ( - sum([self.cte_positional[cte] for cte in ctes], []) - + self.positiontup - ) + self.cte_order[visiting_cte].extend(ctes) + + if visiting_cte is None and self.cte_order: + assert self.positiontup is not None + + def get_nested_positional(cte): + if cte in self.cte_order: + children = self.cte_order.pop(cte) + to_add = list( + itertools.chain.from_iterable( + get_nested_positional(child_cte) + for child_cte in children + ) + ) + if cte in self.cte_positional: + return reorder_positional( + self.cte_positional[cte], + to_add, + self.cte_level[children[0]], + ) + else: + return to_add + else: + return self.cte_positional.get(cte, []) + + def reorder_positional(pos, to_add, level): + if not level: + return to_add + pos + index = 0 + for index, name in enumerate(reversed(pos)): + if self.positiontup_level[name] < level: # type: ignore[index] # noqa: E501 + break + return pos[:-index] + to_add + pos[-index:] + + to_add = get_nested_positional(None) + self.positiontup = reorder_positional( + self.positiontup, to_add, nesting_level + ) + cte_text = self.get_cte_preamble(ctes_recursive) + " " cte_text += ", \n".join([txt for txt in ctes.values()]) cte_text += "\n " @@ -3985,6 +4039,7 @@ def visit_insert(self, insert_stmt, **kw): self._render_cte_clause( nesting_level=nesting_level, include_following_stack=True, + visiting_cte=kw.get("visiting_cte"), ), select_text, ) @@ -4022,7 +4077,9 @@ def visit_insert(self, insert_stmt, **kw): nesting_level = len(self.stack) if not toplevel else None text = ( self._render_cte_clause( - nesting_level=nesting_level, include_following_stack=True + nesting_level=nesting_level, + include_following_stack=True, + visiting_cte=kw.get("visiting_cte"), ) + text ) @@ -4162,7 +4219,13 @@ def visit_update(self, update_stmt, **kw): if self.ctes: nesting_level = len(self.stack) if not toplevel else None - text = self._render_cte_clause(nesting_level=nesting_level) + text + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) self.stack.pop(-1) @@ -4268,7 +4331,13 @@ def visit_delete(self, delete_stmt, **kw): if self.ctes: nesting_level = len(self.stack) if not toplevel else None - text = self._render_cte_clause(nesting_level=nesting_level) + text + text = ( + self._render_cte_clause( + nesting_level=nesting_level, + visiting_cte=kw.get("visiting_cte"), + ) + + text + ) self.stack.pop(-1) diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index ba6ee14c3b5..9a022265eb1 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -7,7 +7,9 @@ from __future__ import absolute_import +from collections import defaultdict import contextlib +from copy import copy import re import sys import warnings @@ -499,6 +501,7 @@ def assert_compile( render_schema_translate=False, default_schema_name=None, from_linting=False, + check_param_order=True, ): if use_default_dialect: dialect = default.DefaultDialect() @@ -512,8 +515,11 @@ def assert_compile( if dialect is None: dialect = config.db.dialect - elif dialect == "default": - dialect = default.DefaultDialect() + elif dialect == "default" or dialect == "default_qmark": + if dialect == "default": + dialect = default.DefaultDialect() + else: + dialect = default.DefaultDialect(paramstyle="qmark") dialect.supports_default_values = supports_default_values dialect.supports_default_metavalue = supports_default_metavalue elif dialect == "default_enhanced": @@ -645,7 +651,7 @@ def _compiler_dispatch(self, compiler, **kwargs): if checkparams is not None: eq_(c.construct_params(params), checkparams) if checkpositional is not None: - p = c.construct_params(params) + p = c.construct_params(params, escape_names=False) eq_(tuple([p[x] for x in c.positiontup]), checkpositional) if check_prefetch is not None: eq_(c.prefetch, check_prefetch) @@ -665,6 +671,58 @@ def _compiler_dispatch(self, compiler, **kwargs): }, check_post_param, ) + if check_param_order and getattr(c, "params", None): + + def get_dialect(paramstyle, positional): + cp = copy(dialect) + cp.paramstyle = paramstyle + cp.positional = positional + return cp + + pyformat_dialect = get_dialect("pyformat", False) + pyformat_c = clause.compile(dialect=pyformat_dialect, **kw) + stmt = re.sub(r"[\n\t]", "", pyformat_c.string) + + qmark_dialect = get_dialect("qmark", True) + qmark_c = clause.compile(dialect=qmark_dialect, **kw) + values = list(qmark_c.positiontup) + escaped = qmark_c.escaped_bind_names + + for post_param in ( + qmark_c.post_compile_params | qmark_c.literal_execute_params + ): + name = qmark_c.bind_names[post_param] + if name in values: + values = [v for v in values if v != name] + positions = [] + pos_by_value = defaultdict(list) + for v in values: + try: + if v in pos_by_value: + start = pos_by_value[v][-1] + else: + start = 0 + esc = escaped.get(v, v) + pos = stmt.index("%%(%s)s" % (esc,), start) + 2 + positions.append(pos) + pos_by_value[v].append(pos) + except ValueError: + msg = "Expected to find bindparam %r in %r" % (v, stmt) + assert False, msg + + ordered = all( + positions[i - 1] < positions[i] + for i in range(1, len(positions)) + ) + + expected = [v for _, v in sorted(zip(positions, values))] + + msg = ( + "Order of parameters %s does not match the order " + "in the statement %s. Statement %r" % (values, expected, stmt) + ) + + is_true(ordered, msg) class ComparesTables(object): diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 8a8f51df012..2c586990813 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -1554,11 +1554,11 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_3, :myid_2)", + "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, :myid_3)", checkparams={ "myid_1": "pattern", - "myid_3": "replacement", - "myid_2": "ig", + "myid_2": "replacement", + "myid_3": "ig", }, ) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 0249c7952ce..e9de407c8e7 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -3277,11 +3277,11 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_3)s, %(myid_2)s)", + "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, %(myid_3)s)", checkparams={ "myid_1": "pattern", - "myid_3": "replacement", - "myid_2": "ig", + "myid_2": "replacement", + "myid_3": "ig", }, ) diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 831ef188720..9ede4af9237 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -4794,6 +4794,118 @@ def test_render_nulls_literal_binds(self, stmt, expected, warns, params): stmt, expected, literal_binds=True, params=params ) + standalone_escape = testing.combinations( + ("normalname", "normalname"), + ("_name", "_name"), + ("[BracketsAndCase]", "_BracketsAndCase_"), + ("has spaces", "has_spaces"), + argnames="paramname, expected", + ) + + @standalone_escape + @testing.variation("use_positional", [True, False]) + def test_standalone_bindparam_escape( + self, paramname, expected, use_positional + ): + stmt = select(table1.c.myid).where( + table1.c.name == bindparam(paramname, value="x") + ) + if use_positional: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name = ?", + params={paramname: "y"}, + checkpositional=("y",), + dialect="sqlite", + ) + else: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name = :%s" + % (expected,), + params={paramname: "y"}, + checkparams={expected: "y"}, + dialect="default", + ) + + @standalone_escape + @testing.variation("use_assert_compile", [True, False]) + @testing.variation("use_positional", [True, False]) + def test_standalone_bindparam_escape_expanding( + self, paramname, expected, use_assert_compile, use_positional + ): + stmt = select(table1.c.myid).where( + table1.c.name.in_(bindparam(paramname, value=["a", "b"])) + ) + if use_assert_compile: + if use_positional: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable " + "WHERE mytable.name IN (?, ?)", + params={paramname: ["y", "z"]}, + # NOTE: this is what render_postcompile will do right now + # if you run construct_params(). render_postcompile mode + # is not actually used by the execution internals, it's for + # user-facing compilation code. So this is likely a + # current limitation of construct_params() which is not + # doing the full blown postcompile; just assert that's + # what it does for now. it likely should be corrected + # to make more sense. + checkpositional=(["y", "z"], ["y", "z"]), + dialect="sqlite", + render_postcompile=True, + ) + else: + self.assert_compile( + stmt, + "SELECT mytable.myid FROM mytable WHERE mytable.name IN " + "(:%s_1, :%s_2)" % (expected, expected), + params={paramname: ["y", "z"]}, + # NOTE: this is what render_postcompile will do right now + # if you run construct_params(). render_postcompile mode + # is not actually used by the execution internals, it's for + # user-facing compilation code. So this is likely a + # current limitation of construct_params() which is not + # doing the full blown postcompile; just assert that's + # what it does for now. it likely should be corrected + # to make more sense. + checkparams={ + "%s_1" % expected: ["y", "z"], + "%s_2" % expected: ["y", "z"], + }, + dialect="default", + render_postcompile=True, + ) + else: + # this is what DefaultDialect actually does. + # this should be matched to DefaultDialect._init_compiled() + if use_positional: + compiled = stmt.compile( + dialect=default.DefaultDialect(paramstyle="qmark") + ) + else: + compiled = stmt.compile(dialect=default.DefaultDialect()) + checkparams = compiled.construct_params( + {paramname: ["y", "z"]}, escape_names=False + ) + # nothing actually happened. if the compiler had + # render_postcompile set, the + # above weird param thing happens + eq_(checkparams, {paramname: ["y", "z"]}) + expanded_state = compiled._process_parameters_for_postcompile( + checkparams + ) + eq_( + expanded_state.additional_parameters, + {"%s_1" % (expected,): "y", "%s_2" % (expected,): "z"}, + ) + if use_positional: + eq_( + expanded_state.positiontup, + ["%s_1" % (expected,), "%s_2" % (expected,)], + ) + class UnsupportedTest(fixtures.TestBase): def test_unsupported_element_str_visit_name(self): diff --git a/test/sql/test_cte.py b/test/sql/test_cte.py index fed371f6294..40f92e41d01 100644 --- a/test/sql/test_cte.py +++ b/test/sql/test_cte.py @@ -2095,7 +2095,8 @@ def test_compound_select_with_nesting_cte_in_cte(self): ") SELECT cte.outer_cte FROM cte", ) - def test_nesting_cte_in_recursive_cte(self): + @testing.fixture + def nesting_cte_in_recursive_cte(self): nesting_cte = select(literal(1).label("inner_cte")).cte( "nesting", nesting=True ) @@ -2104,20 +2105,85 @@ def test_nesting_cte_in_recursive_cte(self): "rec_cte", recursive=True ) rec_part = select(rec_cte.c.outer_cte).where( - rec_cte.c.outer_cte == literal(1) + rec_cte.c.outer_cte == literal(42) ) rec_cte = rec_cte.union(rec_part) stmt = select(rec_cte) + return stmt + + def test_nesting_cte_in_recursive_cte_positional( + self, nesting_cte_in_recursive_cte + ): self.assert_compile( - stmt, + nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT ? AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 42), + dialect="default_qmark", + ) + + def test_nesting_cte_in_recursive_cte(self, nesting_cte_in_recursive_cte): + self.assert_compile( + nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT :param_1 AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = :param_2) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 42}, + ) + + @testing.fixture + def nesting_cte_in_recursive_cte_w_add_cte(self): + nesting_cte = select(literal(1).label("inner_cte")).cte( + "nesting", nesting=True + ) + + rec_cte = select(nesting_cte.c.inner_cte.label("outer_cte")).cte( + "rec_cte", recursive=True + ) + rec_part = select(rec_cte.c.outer_cte).where( + rec_cte.c.outer_cte == literal(42) + ) + rec_cte = rec_cte.union(rec_part) + + stmt = select(rec_cte) + return stmt + + def test_nesting_cte_in_recursive_cte_w_add_cte_positional( + self, nesting_cte_in_recursive_cte_w_add_cte + ): + self.assert_compile( + nesting_cte_in_recursive_cte_w_add_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " + "(SELECT ? AS inner_cte) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 42), + dialect="default_qmark", + ) + + def test_nesting_cte_in_recursive_cte_w_add_cte( + self, nesting_cte_in_recursive_cte_w_add_cte + ): + self.assert_compile( + nesting_cte_in_recursive_cte_w_add_cte, "WITH RECURSIVE rec_cte(outer_cte) AS (WITH nesting AS " "(SELECT :param_1 AS inner_cte) " "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " "WHERE rec_cte.outer_cte = :param_2) " "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 42}, ) def test_recursive_nesting_cte_in_cte(self): @@ -2219,18 +2285,19 @@ def test_aliased_recursive_nesting_cte_in_cte(self): "SELECT cte.outer_cte FROM cte", ) - def test_same_nested_cte_is_not_generated_twice(self): + @testing.fixture + def same_nested_cte_is_not_generated_twice(self): # Same = name and query nesting_cte_used_twice = select(literal(1).label("inner_cte_1")).cte( "nesting_cte", nesting=True ) select_add_cte = select( - (nesting_cte_used_twice.c.inner_cte_1 + 1).label("next_value") + (nesting_cte_used_twice.c.inner_cte_1 + 2).label("next_value") ).cte("nesting_2", nesting=True) union_cte = ( select( - (nesting_cte_used_twice.c.inner_cte_1 - 1).label("next_value") + (nesting_cte_used_twice.c.inner_cte_1 - 3).label("next_value") ) .union(select(select_add_cte)) .cte("wrapper", nesting=True) @@ -2241,9 +2308,36 @@ def test_same_nested_cte_is_not_generated_twice(self): .add_cte(nesting_cte_used_twice) .union(select(nesting_cte_used_twice)) ) + return stmt + def test_same_nested_cte_is_not_generated_twice_positional( + self, same_nested_cte_is_not_generated_twice + ): self.assert_compile( - stmt, + same_nested_cte_is_not_generated_twice, + "WITH nesting_cte AS " + "(SELECT ? AS inner_cte_1)" + ", wrapper AS " + "(WITH nesting_2 AS " + "(SELECT nesting_cte.inner_cte_1 + ? " + "AS next_value " + "FROM nesting_cte)" + " SELECT nesting_cte.inner_cte_1 - ? " + "AS next_value " + "FROM nesting_cte UNION SELECT nesting_2.next_value " + "AS next_value FROM nesting_2)" + " SELECT wrapper.next_value " + "FROM wrapper UNION SELECT nesting_cte.inner_cte_1 " + "FROM nesting_cte", + checkpositional=(1, 2, 3), + dialect="default_qmark", + ) + + def test_same_nested_cte_is_not_generated_twice( + self, same_nested_cte_is_not_generated_twice + ): + self.assert_compile( + same_nested_cte_is_not_generated_twice, "WITH nesting_cte AS " "(SELECT :param_1 AS inner_cte_1)" ", wrapper AS " @@ -2253,19 +2347,25 @@ def test_same_nested_cte_is_not_generated_twice(self): "FROM nesting_cte)" " SELECT nesting_cte.inner_cte_1 - :inner_cte_1_1 " "AS next_value " - "FROM nesting_cte UNION SELECT nesting_2.next_value AS next_value " - "FROM nesting_2)" + "FROM nesting_cte UNION SELECT nesting_2.next_value " + "AS next_value FROM nesting_2)" " SELECT wrapper.next_value " "FROM wrapper UNION SELECT nesting_cte.inner_cte_1 " "FROM nesting_cte", + checkparams={ + "param_1": 1, + "inner_cte_1_2": 2, + "inner_cte_1_1": 3, + }, ) - def test_recursive_nesting_cte_in_recursive_cte(self): + @testing.fixture + def recursive_nesting_cte_in_recursive_cte(self): nesting_cte = select(literal(1).label("inner_cte")).cte( "nesting", nesting=True, recursive=True ) nesting_rec_part = select(nesting_cte.c.inner_cte).where( - nesting_cte.c.inner_cte == literal(1) + nesting_cte.c.inner_cte == literal(2) ) nesting_cte = nesting_cte.union(nesting_rec_part) @@ -2273,14 +2373,37 @@ def test_recursive_nesting_cte_in_recursive_cte(self): "rec_cte", recursive=True ) rec_part = select(rec_cte.c.outer_cte).where( - rec_cte.c.outer_cte == literal(1) + rec_cte.c.outer_cte == literal(3) ) rec_cte = rec_cte.union(rec_part) stmt = select(rec_cte) + return stmt + + def test_recursive_nesting_cte_in_recursive_cte_positional( + self, recursive_nesting_cte_in_recursive_cte + ): self.assert_compile( - stmt, + recursive_nesting_cte_in_recursive_cte, + "WITH RECURSIVE rec_cte(outer_cte) AS (" + "WITH RECURSIVE nesting(inner_cte) AS " + "(SELECT ? AS inner_cte UNION " + "SELECT nesting.inner_cte AS inner_cte FROM nesting " + "WHERE nesting.inner_cte = ?) " + "SELECT nesting.inner_cte AS outer_cte FROM nesting UNION " + "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " + "WHERE rec_cte.outer_cte = ?) " + "SELECT rec_cte.outer_cte FROM rec_cte", + checkpositional=(1, 2, 3), + dialect="default_qmark", + ) + + def test_recursive_nesting_cte_in_recursive_cte( + self, recursive_nesting_cte_in_recursive_cte + ): + self.assert_compile( + recursive_nesting_cte_in_recursive_cte, "WITH RECURSIVE rec_cte(outer_cte) AS (" "WITH RECURSIVE nesting(inner_cte) AS " "(SELECT :param_1 AS inner_cte UNION " @@ -2290,6 +2413,7 @@ def test_recursive_nesting_cte_in_recursive_cte(self): "SELECT rec_cte.outer_cte AS outer_cte FROM rec_cte " "WHERE rec_cte.outer_cte = :param_3) " "SELECT rec_cte.outer_cte FROM rec_cte", + checkparams={"param_1": 1, "param_2": 2, "param_3": 3}, ) def test_select_from_insert_cte_with_nesting(self): @@ -2418,7 +2542,43 @@ def test_compound_select_with_nesting_cte_in_custom_order(self): ") SELECT cte.outer_cte FROM cte", ) - def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): + @testing.fixture + def cte_in_compound_select(self): + upper = select(literal(1).label("z")) + + lower_a_cte = select(literal(2).label("x")).cte("xx", nesting=True) + lower_a = select(literal(3).label("y")).add_cte(lower_a_cte) + lower_b = select(literal(4).label("w")) + + stmt = upper.union_all(lower_a.union_all(lower_b)) + return stmt + + def test_cte_in_compound_select_positional(self, cte_in_compound_select): + self.assert_compile( + cte_in_compound_select, + "SELECT ? AS z UNION ALL (WITH xx AS " + "(SELECT ? AS x) " + "SELECT ? AS y UNION ALL SELECT ? AS w)", + checkpositional=(1, 2, 3, 4), + dialect="default_qmark", + ) + + def test_cte_in_compound_select(self, cte_in_compound_select): + self.assert_compile( + cte_in_compound_select, + "SELECT :param_1 AS z UNION ALL (WITH xx AS " + "(SELECT :param_2 AS x) " + "SELECT :param_3 AS y UNION ALL SELECT :param_4 AS w)", + checkparams={ + "param_1": 1, + "param_2": 2, + "param_3": 3, + "param_4": 4, + }, + ) + + @testing.fixture + def recursive_cte_referenced_multiple_times_with_nesting_cte(self): rec_root = select(literal(1).label("the_value")).cte( "recursive_cte", recursive=True ) @@ -2431,7 +2591,7 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): exists( select(rec_root_ref.c.the_value) .where(rec_root_ref.c.the_value < 10) - .limit(1) + .limit(5) ).label("val") ).cte("should_continue", nesting=True) @@ -2447,13 +2607,43 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): rec_cte = rec_root.union_all(rec_part) stmt = rec_cte.select() + return stmt + def test_recursive_cte_referenced_multiple_times_with_nesting_cte_pos( + self, recursive_cte_referenced_multiple_times_with_nesting_cte + ): self.assert_compile( - stmt, + recursive_cte_referenced_multiple_times_with_nesting_cte, + "WITH RECURSIVE recursive_cte(the_value) AS (" + "SELECT ? AS the_value UNION ALL (" + "WITH allow_multiple_ref AS (" + "SELECT recursive_cte.the_value AS the_value " + "FROM recursive_cte)" + ", should_continue AS (SELECT EXISTS (" + "SELECT allow_multiple_ref.the_value FROM allow_multiple_ref" + " WHERE allow_multiple_ref.the_value < ?" + " LIMIT ?) AS val) " + "SELECT allow_multiple_ref.the_value * ? AS anon_1" + " FROM allow_multiple_ref, should_continue " + "WHERE should_continue.val != 1" + " UNION ALL SELECT allow_multiple_ref.the_value * ?" + " AS anon_2 FROM allow_multiple_ref, should_continue" + " WHERE should_continue.val != 1))" + " SELECT recursive_cte.the_value FROM recursive_cte", + checkpositional=(1, 10, 5, 2, 3), + dialect="default_qmark", + ) + + def test_recursive_cte_referenced_multiple_times_with_nesting_cte( + self, recursive_cte_referenced_multiple_times_with_nesting_cte + ): + self.assert_compile( + recursive_cte_referenced_multiple_times_with_nesting_cte, "WITH RECURSIVE recursive_cte(the_value) AS (" "SELECT :param_1 AS the_value UNION ALL (" "WITH allow_multiple_ref AS (" - "SELECT recursive_cte.the_value AS the_value FROM recursive_cte)" + "SELECT recursive_cte.the_value AS the_value " + "FROM recursive_cte)" ", should_continue AS (SELECT EXISTS (" "SELECT allow_multiple_ref.the_value FROM allow_multiple_ref" " WHERE allow_multiple_ref.the_value < :the_value_2" @@ -2465,4 +2655,11 @@ def test_recursive_cte_referenced_multiple_times_with_nesting_cte(self): " AS anon_2 FROM allow_multiple_ref, should_continue" " WHERE should_continue.val != true))" " SELECT recursive_cte.the_value FROM recursive_cte", + checkparams={ + "param_1": 1, + "param_2": 5, + "the_value_2": 10, + "the_value_1": 2, + "the_value_3": 3, + }, ) diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index 1695771486a..37363273b20 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -193,7 +193,8 @@ def visit_grouping(self, elem): ("name with~~tildes~~",), argnames="name", ) - def test_bindparam_key_proc_for_copies(self, meth, name): + @testing.combinations(True, False, argnames="positional") + def test_bindparam_key_proc_for_copies(self, meth, name, positional): r"""test :ticket:`6249`. Revised for :ticket:`8056`. @@ -225,13 +226,25 @@ def test_bindparam_key_proc_for_copies(self, meth, name): token = re.sub(r"[%\(\) \$\[\]]", "_", name) - self.assert_compile( - expr, - '"%(name)s" IN (:%(token)s_1_1, ' - ":%(token)s_1_2, :%(token)s_1_3)" % {"name": name, "token": token}, - render_postcompile=True, - dialect="default", - ) + if positional: + self.assert_compile( + expr, + '"%(name)s" IN (?, ?, ?)' % {"name": name}, + checkpositional=(1, 2, 3), + render_postcompile=True, + dialect="default_qmark", + ) + else: + tokens = ["%s_1_%s" % (token, i) for i in range(1, 4)] + self.assert_compile( + expr, + '"%(name)s" IN (:%(token)s_1_1, ' + ":%(token)s_1_2, :%(token)s_1_3)" + % {"name": name, "token": token}, + checkparams=dict(zip(tokens, [1, 2, 3])), + render_postcompile=True, + dialect="default", + ) def test_expanding_in_bindparam_safe_to_clone(self): expr = column("x").in_([1, 2, 3]) diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py index 3a9a06728cb..908fd9faaf0 100644 --- a/test/sql/test_functions.py +++ b/test/sql/test_functions.py @@ -777,6 +777,22 @@ def test_funcfilter_windowing_range(self): "OVER (PARTITION BY mytable.description RANGE BETWEEN :param_1 " "FOLLOWING AND :param_2 FOLLOWING) " "AS anon_1 FROM mytable", + checkparams={"name_1": "foo", "param_1": 1, "param_2": 5}, + ) + + def test_funcfilter_windowing_range_positional(self): + self.assert_compile( + select( + func.rank() + .filter(table1.c.name > "foo") + .over(range_=(1, 5), partition_by=["description"]) + ), + "SELECT rank() FILTER (WHERE mytable.name > ?) " + "OVER (PARTITION BY mytable.description RANGE BETWEEN ? " + "FOLLOWING AND ? FOLLOWING) " + "AS anon_1 FROM mytable", + checkpositional=("foo", 1, 5), + dialect="default_qmark", ) def test_funcfilter_windowing_rows(self): From 3f1e6303f0f53cd6239b2a6227c8cd55789a175f Mon Sep 17 00:00:00 2001 From: jonathan vanasco Date: Fri, 2 Dec 2022 12:37:30 -0500 Subject: [PATCH 457/632] Returned Github Actions support for py27 and py36. GitHub recently upgraded the `ubuntu-latest` label from `ubuntu-20.04` to `ubuntu-22.04`. The `ubuntu-22.04` image removed support for py27 and py36. To return support, the affected jobs have been duplicated to `-legacy` versions. The -legacy versions of jobs run py27 and py36 on a pinned `ubuntu-20.04` os. The existing jobs continue to run py37+ on ubuntu-latest, as that platform may continue to benefit from patch releases on the python versions. Change-Id: I0f063723cb993fab89bc64c89df6dfcaf4dbe5a5 --- .github/workflows/create-wheels.yaml | 255 ++++++++++++++++++++++++++- .github/workflows/run-on-pr.yaml | 42 ++++- .github/workflows/run-test.yaml | 150 +++++++++++++++- 3 files changed, 436 insertions(+), 11 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 8353c0ec94b..cf10ab88aa3 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -123,6 +123,7 @@ jobs: twine upload --skip-existing dist/* make-wheel-linux: + # any changes should be duplicated in `make-wheel-linux-legacy` name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -131,9 +132,6 @@ jobs: - "ubuntu-latest" python-version: # the versions are - as specified in PEP 425. - - cp27-cp27m - - cp27-cp27mu - - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -280,7 +278,164 @@ jobs: pip install -U twine twine upload --skip-existing dist/*manylinux* + make-wheel-linux-legacy: + # this is identical to `make-wheel-linux`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - "ubuntu-20.04" + python-version: + # the versions are - as specified in PEP 425. + - cp27-cp27m + - cp27-cp27mu + - cp36-cp36m + architecture: + - x64 + + include: + - python-version: "cp27-cp27m" + extra-requires: "mock" + - python-version: "cp27-cp27mu" + extra-requires: "mock" + + fail-fast: false + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Get python version + id: linux-py-version + env: + py_tag: ${{ matrix.python-version }} + # the command `echo "::set-output ...` is used to create an step output that can be used in following steps + # this is from https://github.community/t5/GitHub-Actions/Using-the-output-of-run-inside-of-if-condition/td-p/33920 + run: | + version="`echo $py_tag | sed --regexp-extended 's/cp([0-9])([0-9]+)-.*/\1.\2/g'`" + version=$([[ $version = "3.11" ]] && echo 3.11.0-rc - 3.11 || echo $version ) + echo $version + echo "::set-output name=python-version::$version" + + - name: Remove tag_build from setup.cfg + # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel + # otherwise it gets tagged with `dev0` + shell: pwsh + # This is equivalent to the sed commands: + # `sed -i '/tag_build=dev/d' setup.cfg` + # `sed -i '/tag_build = dev/d' setup.cfg` + + # `-replace` uses a regexp match + # alternative form: `(get-content setup.cfg) | foreach-object{$_ -replace "tag_build.=.dev",""} | set-content setup.cfg` + run: | + (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg + + - name: Create wheel for manylinux1 and manylinux2010 for py3 + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' }} + # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux + # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel + # change the tag of this image to change the image used + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2010_x86_64 + # this action generates 3 wheels in dist/. linux, manylinux1 and manylinux2010 + with: + # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu + python-versions: ${{ matrix.python-version }} + build-requirements: "setuptools>=44 wheel>=0.34" + # Create the wheel using --no-use-pep517 since locally we have pyproject + # This flag should be removed once sqlalchemy supports pep517 + # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies + pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + + - name: Create wheel for manylinux2014 for py3 + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} + # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux + # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel + # change the tag of this image to change the image used + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_x86_64 + # this action generates 2 wheels in dist/. linux and manylinux2014 + with: + # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu + python-versions: ${{ matrix.python-version }} + build-requirements: "setuptools>=44 wheel>=0.34" + # Create the wheel using --no-use-pep517 since locally we have pyproject + # This flag should be removed once sqlalchemy supports pep517 + # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies + pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + + - name: Create wheel for manylinux py2 + if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} + # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux + # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel + # change the tag of this image to change the image used + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux1_x86_64 + # this action generates 2 wheels in dist/. linux and manylinux1 + with: + # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu + python-versions: ${{ matrix.python-version }} + build-requirements: "setuptools>=44 wheel>=0.34" + # Create the wheel using --no-use-pep517 since locally we have pyproject + # This flag should be removed once sqlalchemy supports pep517 + # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies + pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: ${{ steps.linux-py-version.outputs.python-version }} + architecture: ${{ matrix.architecture }} + + - name: Check created wheel + # check that the wheel is compatible with the current installation. + # If it is then does: + # - install the created wheel without using the pypi index + # - check the c extension + # - runs the tests + run: | + pip install 'packaging>=20.4' + if python .github/workflows/scripts/can_install.py "${{ matrix.python-version }}" + then + pip install greenlet "importlib-metadata;python_version<'3.8'" + pip install -f dist --no-index sqlalchemy + python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' + pip install pytest pytest-xdist ${{ matrix.extra-requires }} + pytest -n2 -q test --nomemory --notimingintensive + else + echo Not compatible. Skipping install. + fi + + - name: Upload wheels to release + # upload the generated wheels to the github release + uses: sqlalchemyorg/upload-release-assets@sa + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + files: 'dist/*manylinux*' + + - name: Set up Python for twine + # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload + uses: actions/setup-python@v4 + with: + python-version: "3.8" + + - name: Publish wheel + # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify + # additional options + # We upload both manylinux1 and manylinux2010 wheels. pip will download the appropriate one according to the system. + # manylinux1 is an older format and is now not very used since many environments can use manylinux2010 + # currently (April 2020) manylinux2014 is still wip, so we do not generate it. + env: + TWINE_USERNAME: __token__ + # replace TWINE_PASSWORD with token for real pypi + # TWINE_PASSWORD: ${{ secrets.test_pypi_token }} + TWINE_PASSWORD: ${{ secrets.pypi_token }} + run: | + pip install -U twine + twine upload --skip-existing dist/*manylinux* + + make-wheel-linux-arm64: + # any changes should be duplicated in `make-wheel-linux-arm64-legacy` name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -289,7 +444,6 @@ jobs: - "ubuntu-latest" python-version: # the versions are - as specified in PEP 425. - - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -375,3 +529,96 @@ jobs: run: | pip install -U twine twine upload --skip-existing dist/*manylinux* + + make-wheel-linux-arm64-legacy: + # this is identical to `make-wheel-linux-arm64`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - "ubuntu-20.04" + python-version: + # the versions are - as specified in PEP 425. + - cp36-cp36m + + fail-fast: false + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Remove tag_build from setup.cfg + # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel + # otherwise it gets tagged with `dev0` + shell: pwsh + # This is equivalent to the sed commands: + # `sed -i '/tag_build=dev/d' setup.cfg` + # `sed -i '/tag_build = dev/d' setup.cfg` + + # `-replace` uses a regexp match + # alternative form: `(get-content setup.cfg) | foreach-object{$_ -replace "tag_build.=.dev",""} | set-content setup.cfg` + run: | + (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg + + - name: Set up emulation + run: | + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + - name: Create wheel for manylinux2014 + # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux + # the action uses the image for manylinux2014 but can generate also a manylinux1 wheel + # change the tag of this image to change the image used + uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_aarch64 + # this action generates 2 wheels in dist/. linux and manylinux2014 + with: + # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu + python-versions: ${{ matrix.python-version }} + build-requirements: "setuptools>=44 wheel>=0.34" + # Create the wheel using --no-use-pep517 since locally we have pyproject + # This flag should be removed once sqlalchemy supports pep517 + # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies + pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + + - name: Check created wheel + # check that the wheel is compatible with the current installation. + # - runs the tests + uses: docker://quay.io/pypa/manylinux2014_aarch64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + python --version && + pip install greenlet \"importlib-metadata;python_version<'3.8'\" && + pip install -f dist --no-index sqlalchemy && + python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' && + pip install pytest pytest-xdist ${{ matrix.extra-requires }} && + pytest -n2 -q test --nomemory --notimingintensive" + + - name: Upload wheels to release + # upload the generated wheels to the github release + uses: sqlalchemyorg/upload-release-assets@sa + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + files: 'dist/*manylinux*' + + - name: Set up Python for twine + # Setup python after creating the wheel, otherwise LD_LIBRARY_PATH gets set and it will break wheel generation + # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload + uses: actions/setup-python@v4 + with: + python-version: "3.8" + + - name: Publish wheel + # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify + # additional options + # We upload manylinux2014 arm64 wheels. pip will download the appropriate one according to the system. + env: + TWINE_USERNAME: __token__ + # replace TWINE_PASSWORD with token for real pypi + # TWINE_PASSWORD: ${{ secrets.test_pypi_token }} + TWINE_PASSWORD: ${{ secrets.pypi_token }} + run: | + pip install -U twine + twine upload --skip-existing dist/*manylinux* \ No newline at end of file diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 087f1bc3320..2f11b7dad86 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -17,6 +17,7 @@ permissions: jobs: run-test-amd64: + # any changes to this job should be duplicated in `run-mypy-legacy` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -25,7 +26,6 @@ jobs: os: - "ubuntu-latest" python-version: - - "2.7" - "3.10" build-type: - "cext" @@ -55,6 +55,46 @@ jobs: - name: Run tests run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + run-test-amd64-legacy: + # this is identical to `run-test-amd64`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + # run this job using this matrix, excluding some combinations below. + matrix: + os: + - "ubuntu-20.04" + python-version: + - "2.7" + build-type: + - "cext" + - "nocext" + architecture: + - x64 + # abort all jobs as soon as one fails + fail-fast: true + + # steps to run in each job. Some are github actions, others run shell commands + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + architecture: ${{ matrix.architecture }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install --upgrade tox setuptools + pip list + + - name: Run tests + run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + run-mypy: name: mypy-${{ matrix.python-version }} runs-on: ${{ matrix.os }} diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 1c97f64bc1d..eb7aafae9f6 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -21,6 +21,7 @@ permissions: jobs: run-test: + # any changes to this job should be duplicated in `run-test-legacy` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -31,8 +32,6 @@ jobs: - "windows-latest" - "macos-latest" python-version: - - "2.7" - - "3.6" - "3.7" - "3.8" - "3.9" @@ -53,7 +52,76 @@ jobs: # - python-version: "pypy-3.9" # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" # add aiosqlite on linux - - os: "ubuntu-latest" + - os: "ubuntu-20.04" + pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" + + exclude: + # c-extensions fail to build on windows for python 2.7 + - os: "windows-latest" + python-version: "2.7" + build-type: "cext" + # linux and osx do not have x86 python + - os: "ubuntu-20.04" + architecture: x86 + - os: "macos-latest" + architecture: x86 + # pypy does not have cext + # - python-version: "pypy-3.9" + # build-type: "cext" + # - os: "windows-latest" + # python-version: "pypy-3.9" + # architecture: x86 + + fail-fast: false + + # steps to run in each job. Some are github actions, others run shell commands + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + architecture: ${{ matrix.architecture }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install --upgrade tox setuptools + pip list + + - name: Run tests + run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + + run-test-legacy: + # this is identical to `run-test`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + # run this job using this matrix, excluding some combinations below. + matrix: + os: + - "ubuntu-20.04" + - "windows-latest" + - "macos-latest" + python-version: + - "2.7" + - "3.6" + build-type: + - "cext" + - "nocext" + architecture: + - x64 + - x86 + + include: + # autocommit tests fail on the ci for some reason + # - python-version: "pypy-3.9" + # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" + # add aiosqlite on linux + - os: "ubuntu-20.04" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: @@ -62,7 +130,7 @@ jobs: python-version: "2.7" build-type: "cext" # linux and osx do not have x86 python - - os: "ubuntu-latest" + - os: "ubuntu-20.04" architecture: x86 - os: "macos-latest" architecture: x86 @@ -96,12 +164,12 @@ jobs: run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} run-test-arm64: + # any changes to this job should be duplicated in `run-test-arm64` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest runs-on: ubuntu-latest strategy: matrix: python-version: - - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -134,7 +202,44 @@ jobs: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} " + run-test-arm64-legacy: + # this is identical to `run-test-arm64-legacy`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-20.04 + runs-on: ubuntu-20.04 + strategy: + matrix: + python-version: + - cp36-cp36m + build-type: + - "cext" + - "nocext" + + fail-fast: false + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up emulation + run: | + docker run --rm --privileged multiarch/qemu-user-static --reset -p yes + + - name: Run tests + uses: docker://quay.io/pypa/manylinux2014_aarch64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + python --version && + python -m pip install --upgrade pip && + pip install --upgrade tox setuptools && + pip list && + tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + " + run-mypy: + # any changes to this job should be duplicated in `run-mypy-legacy` name: mypy-${{ matrix.python-version }} runs-on: ${{ matrix.os }} strategy: @@ -143,7 +248,6 @@ jobs: os: - "ubuntu-latest" python-version: - - "3.6" - "3.7" - "3.8" - "3.9" @@ -171,6 +275,40 @@ jobs: - name: Run tests run: tox -e mypy ${{ matrix.pytest-args }} + run-mypy-legacy: + # this is identical to `run-mypy`, but pins ubuntu to 20.04 + # ubuntu-20.04 is necessary to run: py27, py36 + name: mypy-${{ matrix.python-version }} + runs-on: ${{ matrix.os }} + strategy: + # run this job using this matrix, excluding some combinations below. + matrix: + os: + - "ubuntu-20.04" + python-version: + - "3.6" + fail-fast: false + + # steps to run in each job. Some are github actions, others run shell commands + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up python + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + architecture: ${{ matrix.architecture }} + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install --upgrade tox setuptools + pip list + + - name: Run tests + run: tox -e mypy ${{ matrix.pytest-args }} + run-pep8: name: pep8-${{ matrix.python-version }} runs-on: ${{ matrix.os }} From aabddeb62158f4e07e5e93e15118aeb0f2e9cada Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 2 Dec 2022 14:06:45 -0500 Subject: [PATCH 458/632] Revert "Returned Github Actions support for py27 and py36." This reverts commit 3f1e6303f0f53cd6239b2a6227c8cd55789a175f. --- .github/workflows/create-wheels.yaml | 255 +-------------------------- .github/workflows/run-on-pr.yaml | 42 +---- .github/workflows/run-test.yaml | 150 +--------------- 3 files changed, 11 insertions(+), 436 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index cf10ab88aa3..8353c0ec94b 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -123,7 +123,6 @@ jobs: twine upload --skip-existing dist/* make-wheel-linux: - # any changes should be duplicated in `make-wheel-linux-legacy` name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -132,6 +131,9 @@ jobs: - "ubuntu-latest" python-version: # the versions are - as specified in PEP 425. + - cp27-cp27m + - cp27-cp27mu + - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -278,164 +280,7 @@ jobs: pip install -U twine twine upload --skip-existing dist/*manylinux* - make-wheel-linux-legacy: - # this is identical to `make-wheel-linux`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: - - "ubuntu-20.04" - python-version: - # the versions are - as specified in PEP 425. - - cp27-cp27m - - cp27-cp27mu - - cp36-cp36m - architecture: - - x64 - - include: - - python-version: "cp27-cp27m" - extra-requires: "mock" - - python-version: "cp27-cp27mu" - extra-requires: "mock" - - fail-fast: false - - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Get python version - id: linux-py-version - env: - py_tag: ${{ matrix.python-version }} - # the command `echo "::set-output ...` is used to create an step output that can be used in following steps - # this is from https://github.community/t5/GitHub-Actions/Using-the-output-of-run-inside-of-if-condition/td-p/33920 - run: | - version="`echo $py_tag | sed --regexp-extended 's/cp([0-9])([0-9]+)-.*/\1.\2/g'`" - version=$([[ $version = "3.11" ]] && echo 3.11.0-rc - 3.11 || echo $version ) - echo $version - echo "::set-output name=python-version::$version" - - - name: Remove tag_build from setup.cfg - # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel - # otherwise it gets tagged with `dev0` - shell: pwsh - # This is equivalent to the sed commands: - # `sed -i '/tag_build=dev/d' setup.cfg` - # `sed -i '/tag_build = dev/d' setup.cfg` - - # `-replace` uses a regexp match - # alternative form: `(get-content setup.cfg) | foreach-object{$_ -replace "tag_build.=.dev",""} | set-content setup.cfg` - run: | - (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - - - name: Create wheel for manylinux1 and manylinux2010 for py3 - if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' }} - # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux - # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel - # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2010_x86_64 - # this action generates 3 wheels in dist/. linux, manylinux1 and manylinux2010 - with: - # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu - python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 - # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - - - name: Create wheel for manylinux2014 for py3 - if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} - # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux - # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel - # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_x86_64 - # this action generates 2 wheels in dist/. linux and manylinux2014 - with: - # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu - python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 - # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - - - name: Create wheel for manylinux py2 - if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} - # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux - # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel - # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux1_x86_64 - # this action generates 2 wheels in dist/. linux and manylinux1 - with: - # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu - python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 - # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - - - name: Set up Python - uses: actions/setup-python@v4 - with: - python-version: ${{ steps.linux-py-version.outputs.python-version }} - architecture: ${{ matrix.architecture }} - - - name: Check created wheel - # check that the wheel is compatible with the current installation. - # If it is then does: - # - install the created wheel without using the pypi index - # - check the c extension - # - runs the tests - run: | - pip install 'packaging>=20.4' - if python .github/workflows/scripts/can_install.py "${{ matrix.python-version }}" - then - pip install greenlet "importlib-metadata;python_version<'3.8'" - pip install -f dist --no-index sqlalchemy - python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' - pip install pytest pytest-xdist ${{ matrix.extra-requires }} - pytest -n2 -q test --nomemory --notimingintensive - else - echo Not compatible. Skipping install. - fi - - - name: Upload wheels to release - # upload the generated wheels to the github release - uses: sqlalchemyorg/upload-release-assets@sa - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - files: 'dist/*manylinux*' - - - name: Set up Python for twine - # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - - name: Publish wheel - # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify - # additional options - # We upload both manylinux1 and manylinux2010 wheels. pip will download the appropriate one according to the system. - # manylinux1 is an older format and is now not very used since many environments can use manylinux2010 - # currently (April 2020) manylinux2014 is still wip, so we do not generate it. - env: - TWINE_USERNAME: __token__ - # replace TWINE_PASSWORD with token for real pypi - # TWINE_PASSWORD: ${{ secrets.test_pypi_token }} - TWINE_PASSWORD: ${{ secrets.pypi_token }} - run: | - pip install -U twine - twine upload --skip-existing dist/*manylinux* - - make-wheel-linux-arm64: - # any changes should be duplicated in `make-wheel-linux-arm64-legacy` name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -444,6 +289,7 @@ jobs: - "ubuntu-latest" python-version: # the versions are - as specified in PEP 425. + - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -529,96 +375,3 @@ jobs: run: | pip install -U twine twine upload --skip-existing dist/*manylinux* - - make-wheel-linux-arm64-legacy: - # this is identical to `make-wheel-linux-arm64`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - os: - - "ubuntu-20.04" - python-version: - # the versions are - as specified in PEP 425. - - cp36-cp36m - - fail-fast: false - - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Remove tag_build from setup.cfg - # sqlalchemy has `tag_build` set to `dev` in setup.cfg. We need to remove it before creating the weel - # otherwise it gets tagged with `dev0` - shell: pwsh - # This is equivalent to the sed commands: - # `sed -i '/tag_build=dev/d' setup.cfg` - # `sed -i '/tag_build = dev/d' setup.cfg` - - # `-replace` uses a regexp match - # alternative form: `(get-content setup.cfg) | foreach-object{$_ -replace "tag_build.=.dev",""} | set-content setup.cfg` - run: | - (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - - - name: Set up emulation - run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - - name: Create wheel for manylinux2014 - # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux - # the action uses the image for manylinux2014 but can generate also a manylinux1 wheel - # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_aarch64 - # this action generates 2 wheels in dist/. linux and manylinux2014 - with: - # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu - python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 - # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - - - name: Check created wheel - # check that the wheel is compatible with the current installation. - # - runs the tests - uses: docker://quay.io/pypa/manylinux2014_aarch64 - with: - args: | - bash -c " - export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && - python --version && - pip install greenlet \"importlib-metadata;python_version<'3.8'\" && - pip install -f dist --no-index sqlalchemy && - python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' && - pip install pytest pytest-xdist ${{ matrix.extra-requires }} && - pytest -n2 -q test --nomemory --notimingintensive" - - - name: Upload wheels to release - # upload the generated wheels to the github release - uses: sqlalchemyorg/upload-release-assets@sa - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - files: 'dist/*manylinux*' - - - name: Set up Python for twine - # Setup python after creating the wheel, otherwise LD_LIBRARY_PATH gets set and it will break wheel generation - # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload - uses: actions/setup-python@v4 - with: - python-version: "3.8" - - - name: Publish wheel - # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify - # additional options - # We upload manylinux2014 arm64 wheels. pip will download the appropriate one according to the system. - env: - TWINE_USERNAME: __token__ - # replace TWINE_PASSWORD with token for real pypi - # TWINE_PASSWORD: ${{ secrets.test_pypi_token }} - TWINE_PASSWORD: ${{ secrets.pypi_token }} - run: | - pip install -U twine - twine upload --skip-existing dist/*manylinux* \ No newline at end of file diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 2f11b7dad86..087f1bc3320 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -17,7 +17,6 @@ permissions: jobs: run-test-amd64: - # any changes to this job should be duplicated in `run-mypy-legacy` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -25,48 +24,9 @@ jobs: matrix: os: - "ubuntu-latest" - python-version: - - "3.10" - build-type: - - "cext" - - "nocext" - architecture: - - x64 - # abort all jobs as soon as one fails - fail-fast: true - - # steps to run in each job. Some are github actions, others run shell commands - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Set up python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.architecture }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install --upgrade tox setuptools - pip list - - - name: Run tests - run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} - - run-test-amd64-legacy: - # this is identical to `run-test-amd64`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - # run this job using this matrix, excluding some combinations below. - matrix: - os: - - "ubuntu-20.04" python-version: - "2.7" + - "3.10" build-type: - "cext" - "nocext" diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index eb7aafae9f6..1c97f64bc1d 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -21,7 +21,6 @@ permissions: jobs: run-test: - # any changes to this job should be duplicated in `run-test-legacy` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: @@ -32,6 +31,8 @@ jobs: - "windows-latest" - "macos-latest" python-version: + - "2.7" + - "3.6" - "3.7" - "3.8" - "3.9" @@ -52,76 +53,7 @@ jobs: # - python-version: "pypy-3.9" # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" # add aiosqlite on linux - - os: "ubuntu-20.04" - pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" - - exclude: - # c-extensions fail to build on windows for python 2.7 - - os: "windows-latest" - python-version: "2.7" - build-type: "cext" - # linux and osx do not have x86 python - - os: "ubuntu-20.04" - architecture: x86 - - os: "macos-latest" - architecture: x86 - # pypy does not have cext - # - python-version: "pypy-3.9" - # build-type: "cext" - # - os: "windows-latest" - # python-version: "pypy-3.9" - # architecture: x86 - - fail-fast: false - - # steps to run in each job. Some are github actions, others run shell commands - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Set up python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.architecture }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install --upgrade tox setuptools - pip list - - - name: Run tests - run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} - - run-test-legacy: - # this is identical to `run-test`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - # run this job using this matrix, excluding some combinations below. - matrix: - os: - - "ubuntu-20.04" - - "windows-latest" - - "macos-latest" - python-version: - - "2.7" - - "3.6" - build-type: - - "cext" - - "nocext" - architecture: - - x64 - - x86 - - include: - # autocommit tests fail on the ci for some reason - # - python-version: "pypy-3.9" - # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" - # add aiosqlite on linux - - os: "ubuntu-20.04" + - os: "ubuntu-latest" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: @@ -130,7 +62,7 @@ jobs: python-version: "2.7" build-type: "cext" # linux and osx do not have x86 python - - os: "ubuntu-20.04" + - os: "ubuntu-latest" architecture: x86 - os: "macos-latest" architecture: x86 @@ -164,12 +96,12 @@ jobs: run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} run-test-arm64: - # any changes to this job should be duplicated in `run-test-arm64` name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest runs-on: ubuntu-latest strategy: matrix: python-version: + - cp36-cp36m - cp37-cp37m - cp38-cp38 - cp39-cp39 @@ -202,44 +134,7 @@ jobs: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} " - run-test-arm64-legacy: - # this is identical to `run-test-arm64-legacy`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-20.04 - runs-on: ubuntu-20.04 - strategy: - matrix: - python-version: - - cp36-cp36m - build-type: - - "cext" - - "nocext" - - fail-fast: false - - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Set up emulation - run: | - docker run --rm --privileged multiarch/qemu-user-static --reset -p yes - - - name: Run tests - uses: docker://quay.io/pypa/manylinux2014_aarch64 - with: - args: | - bash -c " - export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && - python --version && - python -m pip install --upgrade pip && - pip install --upgrade tox setuptools && - pip list && - tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} - " - run-mypy: - # any changes to this job should be duplicated in `run-mypy-legacy` name: mypy-${{ matrix.python-version }} runs-on: ${{ matrix.os }} strategy: @@ -248,6 +143,7 @@ jobs: os: - "ubuntu-latest" python-version: + - "3.6" - "3.7" - "3.8" - "3.9" @@ -275,40 +171,6 @@ jobs: - name: Run tests run: tox -e mypy ${{ matrix.pytest-args }} - run-mypy-legacy: - # this is identical to `run-mypy`, but pins ubuntu to 20.04 - # ubuntu-20.04 is necessary to run: py27, py36 - name: mypy-${{ matrix.python-version }} - runs-on: ${{ matrix.os }} - strategy: - # run this job using this matrix, excluding some combinations below. - matrix: - os: - - "ubuntu-20.04" - python-version: - - "3.6" - fail-fast: false - - # steps to run in each job. Some are github actions, others run shell commands - steps: - - name: Checkout repo - uses: actions/checkout@v3 - - - name: Set up python - uses: actions/setup-python@v4 - with: - python-version: ${{ matrix.python-version }} - architecture: ${{ matrix.architecture }} - - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install --upgrade tox setuptools - pip list - - - name: Run tests - run: tox -e mypy ${{ matrix.pytest-args }} - run-pep8: name: pep8-${{ matrix.python-version }} runs-on: ${{ matrix.os }} From 67c2a6b2aac326dcc46468f2103d9aaa000f6ce0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 2 Dec 2022 17:00:10 -0500 Subject: [PATCH 459/632] add spaces, leading underscore to oracle checks Expand the test suite from #8708 which unfortunately did not exercise the bound parameter codepaths completely. Continued fixes for Oracle fix :ticket:`8708` released in 1.4.43 where bound parameter names that start with underscores, which are disallowed by Oracle, were still not being properly escaped in all circumstances. Fixes: #8708 Change-Id: Ic389c09bd7c53b773e5de35f1a18ef20769b92a7 (cherry picked from commit 2886412438de072b4925818ac746e56a2067bee3) --- doc/build/changelog/unreleased_14/8708.rst | 9 +++ lib/sqlalchemy/dialects/oracle/cx_oracle.py | 8 +-- lib/sqlalchemy/testing/suite/test_dialect.py | 66 +++++++++++++++++++- 3 files changed, 76 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8708.rst diff --git a/doc/build/changelog/unreleased_14/8708.rst b/doc/build/changelog/unreleased_14/8708.rst new file mode 100644 index 00000000000..61dcbf658ec --- /dev/null +++ b/doc/build/changelog/unreleased_14/8708.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, oracle + :tickets: 8708 + :versions: 2.0.0b4 + + Continued fixes for Oracle fix :ticket:`8708` released in 1.4.43 where + bound parameter names that start with underscores, which are disallowed by + Oracle, were still not being properly escaped in all circumstances. + diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index fe18d1310b0..acdf4ded28a 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -472,13 +472,13 @@ def _remove_clob(inputsizes, cursor, statement, parameters, context): from ...util import compat -_ORACLE_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]\.\/\?]") +_ORACLE_BIND_TRANSLATE_RE = re.compile(r"[%\(\):\[\]\.\/\? ]") # Oracle bind names can't start with digits or underscores. # currently we rely upon Oracle-specific quoting of bind names in most cases. # however for expanding params, the escape chars are used. # see #8708 -_ORACLE_BIND_TRANSLATE_CHARS = dict(zip("%():[]./?", "PAZCCCCCCC")) +_ORACLE_BIND_TRANSLATE_CHARS = dict(zip("%():[]./? ", "PAZCCCCCCCC")) class _OracleInteger(sqltypes.Integer): @@ -740,11 +740,11 @@ def bindparam_string(self, name, **kw): lambda m: _ORACLE_BIND_TRANSLATE_CHARS[m.group(0)], name, ) - if new_name[0].isdigit(): + if new_name[0].isdigit() or new_name[0] == "_": new_name = "D" + new_name kw["escaped_from"] = name name = new_name - elif name[0].isdigit(): + elif name[0].isdigit() or name[0] == "_": new_name = "D" + name kw["escaped_from"] = name name = new_name diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index 54acc7ec4b9..99947bbe4f5 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -317,7 +317,7 @@ class FutureWeCanSetDefaultSchemaWEventsTest( class DifficultParametersTest(fixtures.TestBase): __backend__ = True - @testing.combinations( + tough_parameters = testing.combinations( ("boring",), ("per cent",), ("per % cent",), @@ -328,14 +328,26 @@ class DifficultParametersTest(fixtures.TestBase): ("_starts_with_underscore",), ("dot.s",), ("more :: %colons%",), + ("_name",), + ("___name",), + ("[BracketsAndCase]",), + ("42numbers",), + ("percent%signs",), + ("has spaces",), ("/slashes/",), ("more/slashes",), ("q?marks",), ("1param",), ("1col:on",), - argnames="name", + argnames="paramname", ) - def test_round_trip(self, name, connection, metadata): + + @tough_parameters + def test_round_trip_same_named_column( + self, paramname, connection, metadata + ): + name = paramname + t = Table( "t", metadata, @@ -368,3 +380,51 @@ def test_round_trip(self, name, connection, metadata): ) row = connection.execute(stmt).first() + + @testing.fixture + def multirow_fixture(self, metadata, connection): + mytable = Table( + "mytable", + metadata, + Column("myid", Integer), + Column("name", String(50)), + Column("desc", String(50)), + ) + + mytable.create(connection) + + connection.execute( + mytable.insert(), + [ + {"myid": 1, "name": "a", "desc": "a_desc"}, + {"myid": 2, "name": "b", "desc": "b_desc"}, + {"myid": 3, "name": "c", "desc": "c_desc"}, + {"myid": 4, "name": "d", "desc": "d_desc"}, + ], + ) + yield mytable + + @tough_parameters + def test_standalone_bindparam_escape( + self, paramname, connection, multirow_fixture + ): + tbl1 = multirow_fixture + stmt = select(tbl1.c.myid).where( + tbl1.c.name == bindparam(paramname, value="x") + ) + res = connection.scalar(stmt, {paramname: "c"}) + eq_(res, 3) + + @tough_parameters + def test_standalone_bindparam_escape_expanding( + self, paramname, connection, multirow_fixture + ): + tbl1 = multirow_fixture + stmt = ( + select(tbl1.c.myid) + .where(tbl1.c.name.in_(bindparam(paramname, value=["a", "b"]))) + .order_by(tbl1.c.myid) + ) + + res = connection.scalars(stmt, {paramname: ["d", "a"]}).all() + eq_(res, [1, 4]) From 50504f0c7015c7d66e43d19c165377c3567133a0 Mon Sep 17 00:00:00 2001 From: Jonathan Vanasco Date: Sat, 3 Dec 2022 11:20:51 -0500 Subject: [PATCH 460/632] Returned "GitHub Actions" support for py27 and py36 environments. (#8924) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GitHub recently upgraded the `ubuntu-latest` operating system label to point to `ubuntu-22.04` instead of `ubuntu-22.04`. The `ubuntu-22.04` build does not support Python 2.7 or 3.6. In order to return support for CI Testing and release builds, the affected jobs now utilize `include` and `exclude` commands in run matrixes to enable and disable jobs under certain environments. Jobs that requres Py27 and Py36 now run under an explicitly identified `ubuntu-20.04` operating system. The majority of jobs require Py37 or higher, which are all currently supported in the new `ubuntu-latest` operating system ( which points to `ubuntu-22.04`). Although `ubuntu-20.04` should continue to receive support for several more years, the `ubuntu-22.04` platform is likely to benefit from patch releases on a faster schedule – so it is preferable to continue running all compatible tests on `ubuntu-latest` rather than pinning everything to the earlier os version. Several jobs were also standardized to use the job "name" as a prefix, a convention that most jobs in the workflows already adapted. This practice greatly simplifies correlating failed tests to specific jobs. See:: * https://github.blog/changelog/2022-11-09-github-actions-ubuntu-latest-workflows-will-use-ubuntu-22-04/ * https://github.blog/changelog/2022-12-01-github-actions-larger-runners-using-ubuntu-latest-label-will-now-use-ubuntu-22-04/ Change-Id: I0014029c7c6ee74824c8d971bd21ee9199bc8381 --- .github/workflows/create-wheels.yaml | 43 ++++++++++++++++-- .github/workflows/run-on-pr.yaml | 11 ++++- .github/workflows/run-test.yaml | 66 +++++++++++++++++++++++++--- 3 files changed, 110 insertions(+), 10 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 8353c0ec94b..d88da9038c2 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -15,7 +15,7 @@ jobs: # two jobs are defined make-wheel-win-osx and make-wheel-linux. # they do the the same steps, but linux wheels need to be build to target manylinux make-wheel-win-osx: - name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} + name: wheel-win-osx-${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: @@ -123,12 +123,13 @@ jobs: twine upload --skip-existing dist/* make-wheel-linux: - name: ${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} + name: wheel-linux-${{ matrix.python-version }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - "ubuntu-latest" + - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. - cp27-cp27m @@ -148,6 +149,26 @@ jobs: - python-version: "cp27-cp27mu" extra-requires: "mock" + exclude: + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: cp27-cp27m + - os: "ubuntu-latest" + python-version: cp27-cp27mu + - os: "ubuntu-latest" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38 + - os: "ubuntu-20.04" + python-version: cp39-cp39 + - os: "ubuntu-20.04" + python-version: cp310-cp310 + - os: "ubuntu-20.04" + python-version: cp311-cp311 + fail-fast: false steps: @@ -281,12 +302,13 @@ jobs: twine upload --skip-existing dist/*manylinux* make-wheel-linux-arm64: - name: ${{ matrix.python-version }}-arm64-${{ matrix.os }} + name: wheel-linux-arm64-${{ matrix.python-version }}-arm64-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: os: - "ubuntu-latest" + - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. - cp36-cp36m @@ -295,6 +317,21 @@ jobs: - cp39-cp39 - cp310-cp310 - cp311-cp311 + exclude: + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38 + - os: "ubuntu-20.04" + python-version: cp39-cp39 + - os: "ubuntu-20.04" + python-version: cp310-cp310 + - os: "ubuntu-20.04" + python-version: cp311-cp311 fail-fast: false diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 087f1bc3320..214c79b2c32 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -17,13 +17,14 @@ permissions: jobs: run-test-amd64: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + name: test-amd64-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" + - "ubuntu-20.04" python-version: - "2.7" - "3.10" @@ -32,6 +33,14 @@ jobs: - "nocext" architecture: - x64 + exclude: + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: "2.7" + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: "3.10" + # abort all jobs as soon as one fails fail-fast: true diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 1c97f64bc1d..2a08090ef97 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -21,13 +21,14 @@ permissions: jobs: run-test: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} + name: test-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.architecture }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" + - "ubuntu-20.04" - "windows-latest" - "macos-latest" python-version: @@ -55,6 +56,8 @@ jobs: # add aiosqlite on linux - os: "ubuntu-latest" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" + - os: "ubuntu-20.04" + pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: # c-extensions fail to build on windows for python 2.7 @@ -64,8 +67,26 @@ jobs: # linux and osx do not have x86 python - os: "ubuntu-latest" architecture: x86 + - os: "ubuntu-20.04" + architecture: x86 - os: "macos-latest" architecture: x86 + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: "2.7" + - os: "ubuntu-latest" + python-version: "3.6" + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: "3.7" + - os: "ubuntu-20.04" + python-version: "3.8" + - os: "ubuntu-20.04" + python-version: "3.9" + - os: "ubuntu-20.04" + python-version: "3.10" + - os: "ubuntu-20.04" + python-version: "3.11.0-rc - 3.11" # pypy does not have cext # - python-version: "pypy-3.9" # build-type: "cext" @@ -96,10 +117,13 @@ jobs: run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} run-test-arm64: - name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest - runs-on: ubuntu-latest + name: arm64-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} strategy: matrix: + os: + - "ubuntu-latest" + - "ubuntu-20.04" python-version: - cp36-cp36m - cp37-cp37m @@ -110,6 +134,21 @@ jobs: build-type: - "cext" - "nocext" + exclude: + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: cp36-cp36m + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: cp37-cp37m + - os: "ubuntu-20.04" + python-version: cp38-cp38m + - os: "ubuntu-20.04" + python-version: cp39-cp39m + - os: "ubuntu-20.04" + python-version: cp310-cp310m + - os: "ubuntu-20.04" + python-version: cp311-cp311m fail-fast: false @@ -135,13 +174,14 @@ jobs: " run-mypy: - name: mypy-${{ matrix.python-version }} + name: mypy-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" + - "ubuntu-20.04" python-version: - "3.6" - "3.7" @@ -149,6 +189,21 @@ jobs: - "3.9" - "3.10" - "3.11.0-rc - 3.11" + exclude: + # ubuntu-latest does not have: py27, py36 + - os: "ubuntu-latest" + python-version: "3.6" + # ubuntu-20.04 does not need to test what ubuntu-latest supports + - os: "ubuntu-20.04" + python-version: "3.7" + - os: "ubuntu-20.04" + python-version: "3.8" + - os: "ubuntu-20.04" + python-version: "3.9" + - os: "ubuntu-20.04" + python-version: "3.10" + - os: "ubuntu-20.04" + python-version: "3.11.0-rc - 3.11" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands @@ -172,10 +227,9 @@ jobs: run: tox -e mypy ${{ matrix.pytest-args }} run-pep8: - name: pep8-${{ matrix.python-version }} + name: pep8-${{ matrix.python-version }}-${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: - # run this job using this matrix, excluding some combinations below. matrix: os: - "ubuntu-latest" From b482b2af6fd917d0ead7446cce8abd4cf0747d26 Mon Sep 17 00:00:00 2001 From: wiseaidev Date: Mon, 5 Dec 2022 08:44:35 -0500 Subject: [PATCH 461/632] Fixed an invalid syntax in an except statement ### Description As the title suggests, I have fixed an invalid syntax in the docs for an `except` statement while reading the unusual. ### Checklist This pull request is: - [x] A documentation / typographical error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. **Have a nice day!** Closes: #8715 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8715 Pull-request-sha: e8be2bc4a5401ab2a5a0fc1d2e50d41fa437ae80 Change-Id: If8512bf1853f7cdb1ae655f0945cd922fff6fbce (cherry picked from commit 38636bfd22f236343daf11aef31145ae54867028) --- doc/build/core/pooling.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst index a93b9477f8d..35c312302ce 100644 --- a/doc/build/core/pooling.rst +++ b/doc/build/core/pooling.rst @@ -412,7 +412,7 @@ illustrated by the code example below:: # suppose the database has been restarted. c.execute(text("SELECT * FROM table")) c.close() - except exc.DBAPIError, e: + except exc.DBAPIError as e: # an exception is raised, Connection is invalidated. if e.connection_invalidated: print("Connection was invalidated!") From 6999ab8f79b4f014dc1ba8113fee8b9c78920a22 Mon Sep 17 00:00:00 2001 From: j00356287 Date: Mon, 5 Dec 2022 08:44:09 -0500 Subject: [PATCH 462/632] doc change - Add new external dialect for openGauss ### Description Added new external dialect for [openGauss](https://www.opengauss.org/en/). ### Checklist This pull request is: - [x] A documentation / typographical error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. **Have a nice day!** Closes: #8803 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8803 Pull-request-sha: ca5a7f1310b2ce93ce33618e0609abe23b41a3da Change-Id: Ia8ca38f98f346fa3cc910ceb7af47f8c903eb587 (cherry picked from commit db6145287244ca9801f7b7e1acc90cd25a572cde) --- doc/build/dialects/index.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index 48c677da699..14ef2ed8f66 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -121,6 +121,8 @@ Currently maintained external dialect projects for SQLAlchemy include: +------------------------------------------------+---------------------------------------+ | MonetDB [1]_ | sqlalchemy-monetdb_ | +------------------------------------------------+---------------------------------------+ +| OpenGauss | openGauss-sqlalchemy_ | ++------------------------------------------------+---------------------------------------+ | SAP ASE (fork of former Sybase dialect) | sqlalchemy-sybase_ | +------------------------------------------------+---------------------------------------+ | SAP Hana [1]_ | sqlalchemy-hana_ | @@ -134,6 +136,7 @@ Currently maintained external dialect projects for SQLAlchemy include: .. [1] Supports version 1.3.x only at the moment. +.. _openGauss-sqlalchemy: https://gitee.com/opengauss/openGauss-sqlalchemy .. _sqlalchemy-ingres: https://github.com/clach04/ingres_sa_dialect .. _nzalchemy: https://pypi.org/project/nzalchemy/ .. _ibm-db-sa: https://pypi.org/project/ibm-db-sa/ From da505159463abbc0bb178a3a71e6348890f0e9aa Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 4 Dec 2022 23:25:14 -0500 Subject: [PATCH 463/632] adjustments for unreliable gc sporadic (and at the moment persistent) test failures related to aiosqlite seem to have in common that Python gc stops working fully when we run a lot of tests with aiosqlite. The failures are not limited to aiosqlite as they are more involving places where we assume or expect gc.collect() to get rid of things, and it doesn't. Identify (based on reproducible case on the d3 CI runner) the spots where this happens and add fixes. test/orm/test_transaction.py test_gced_delete_on_rollback has always been a very sensitive test with a lot of issues, so here we move it to the test_memusage suite and limit it only to when the memusage suite is running. Change-Id: I683412d0effe8732c45980b40722e5bb63431177 (cherry picked from commit ca46caede4b8d846f3cd48e642922ae821d0be2b) --- test/aaa_profiling/test_memusage.py | 55 +++++++++++++++++++++++++++++ test/orm/test_transaction.py | 37 ------------------- test/requirements.py | 1 + 3 files changed, 56 insertions(+), 37 deletions(-) diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py index bd727a842ac..4b2699e1bea 100644 --- a/test/aaa_profiling/test_memusage.py +++ b/test/aaa_profiling/test_memusage.py @@ -17,6 +17,7 @@ from sqlalchemy import util from sqlalchemy.engine import result from sqlalchemy.orm import aliased +from sqlalchemy.orm import attributes from sqlalchemy.orm import clear_mappers from sqlalchemy.orm import configure_mappers from sqlalchemy.orm import declarative_base @@ -1755,3 +1756,57 @@ def go(): s.close() go() + + +class MiscMemoryIntensiveTests(fixtures.TestBase): + __tags__ = ("memory_intensive",) + + @testing.fixture + def user_fixture(self, decl_base): + class User(decl_base): + __tablename__ = "user" + + id = Column(Integer, primary_key=True) + name = Column(String(50)) + + decl_base.metadata.create_all(testing.db) + yield User + + @testing.requires.predictable_gc + def test_gced_delete_on_rollback(self, user_fixture): + User = user_fixture + + s = fixture_session() + u1 = User(name="ed") + s.add(u1) + s.commit() + + s.delete(u1) + u1_state = attributes.instance_state(u1) + assert u1_state in s.identity_map.all_states() + assert u1_state in s._deleted + s.flush() + assert u1_state not in s.identity_map.all_states() + assert u1_state not in s._deleted + del u1 + gc_collect() + gc_collect() + gc_collect() + assert u1_state.obj() is None + + s.rollback() + # new in 1.1, not in identity map if the object was + # gc'ed and we restore snapshot; we've changed update_impl + # to just skip this object + assert u1_state not in s.identity_map.all_states() + + # in any version, the state is replaced by the query + # because the identity map would switch it + u1 = s.query(User).filter_by(name="ed").one() + assert u1_state not in s.identity_map.all_states() + + eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 1) + s.delete(u1) + s.flush() + eq_(s.scalar(select(func.count("*")).select_from(User.__table__)), 0) + s.commit() diff --git a/test/orm/test_transaction.py b/test/orm/test_transaction.py index e077220e19b..9d81e95b22d 100644 --- a/test/orm/test_transaction.py +++ b/test/orm/test_transaction.py @@ -1345,43 +1345,6 @@ def test_update_deleted_on_rollback(self): assert u1 in s assert u1 not in s.deleted - @testing.requires.predictable_gc - def test_gced_delete_on_rollback(self): - User, users = self.classes.User, self.tables.users - - s = fixture_session() - u1 = User(name="ed") - s.add(u1) - s.commit() - - s.delete(u1) - u1_state = attributes.instance_state(u1) - assert u1_state in s.identity_map.all_states() - assert u1_state in s._deleted - s.flush() - assert u1_state not in s.identity_map.all_states() - assert u1_state not in s._deleted - del u1 - gc_collect() - assert u1_state.obj() is None - - s.rollback() - # new in 1.1, not in identity map if the object was - # gc'ed and we restore snapshot; we've changed update_impl - # to just skip this object - assert u1_state not in s.identity_map.all_states() - - # in any version, the state is replaced by the query - # because the identity map would switch it - u1 = s.query(User).filter_by(name="ed").one() - assert u1_state not in s.identity_map.all_states() - - eq_(s.scalar(select(func.count("*")).select_from(users)), 1) - s.delete(u1) - s.flush() - eq_(s.scalar(select(func.count("*")).select_from(users)), 0) - s.commit() - def test_trans_deleted_cleared_on_rollback(self): User = self.classes.User s = fixture_session() diff --git a/test/requirements.py b/test/requirements.py index b5e9711115c..55c3383a42f 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -409,6 +409,7 @@ def memory_process_intensive(self): [ no_support("oracle", "Oracle XE usually can't handle these"), no_support("mssql+pyodbc", "MS ODBC drivers struggle"), + no_support("+aiosqlite", "very unreliable driver"), self._running_on_windows(), ] ) From b59d828c9f5ce319ce2af694b868634ffb1d0237 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 5 Dec 2022 17:16:32 -0500 Subject: [PATCH 464/632] changelog updates prep for 2.0.0b4 Change-Id: I2bf7249f6ed0c120b9d04d81eaecdf8593729c83 (cherry picked from commit 42876aabb5f893e1922676eb809e6b37c0519ed8) --- doc/build/changelog/unreleased_14/8881.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8881.rst b/doc/build/changelog/unreleased_14/8881.rst index f3fe5e66e74..9cd62f491ab 100644 --- a/doc/build/changelog/unreleased_14/8881.rst +++ b/doc/build/changelog/unreleased_14/8881.rst @@ -3,12 +3,12 @@ :tickets: 8881 Fixed issues in :func:`_orm.with_expression` where expressions that were - composed of columns within a subquery being SELECTed from, or when using - ``.from_statement()``, would not render correct SQL **if** the expression + composed of columns that were referenced from the enclosing SELECT would + not render correct SQL in some contexts, in the case where the expression had a label name that matched the attribute which used :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had no default expression. For the moment, if the :func:`_orm.query_expression` - **does** have a default expression, that label name is still used for that - default, and an additional label with the same name will be ignored. - Overall, this case is pretty thorny so further adjustments might be - warranted. + does have a default expression, that label name is still used for that + default, and an additional label with the same name will continue to be + ignored. Overall, this case is pretty thorny so further adjustments might + be warranted. From 1b261e8a8b58d6c13fa1e4d544bffd06fbcf0c55 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 5 Dec 2022 21:04:21 -0500 Subject: [PATCH 465/632] repair memusage tox under python 2 memusage was inadvertently blocked under py2k. also add path to test files as old pytest 4 versions are extremely slow collecting tests on py2. Change-Id: Ida74b6ef7f3c29d03201e27876cce56c517f3b05 --- tox.ini | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 87105998e56..923c2cf3727 100644 --- a/tox.ini +++ b/tox.ini @@ -82,7 +82,7 @@ setenv= cext: REQUIRE_SQLALCHEMY_CEXT=1 cov: COVERAGE={[testenv]cov_args} backendonly: BACKENDONLY=--backend-only - memusage: MEMUSAGE='-k test_memusage' + memusage: MEMUSAGE=-k test_memusage test/aaa_profiling/ oracle: WORKERS={env:TOX_WORKERS:-n2 --max-worker-restart=5} oracle: ORACLE={env:TOX_ORACLE:--db oracle} @@ -105,6 +105,8 @@ setenv= py3{,5,6,7,8,9}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive py2{,7}: PY_SPECIFIC=--exclude-tag memory-intensive --exclude-tag timing-intensive + memusage: PY_SPECIFIC= + mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} # py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} From 3fd9c5c5ac2910cc08eb1284860d7300288daf47 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 7 Dec 2022 15:54:59 -0500 Subject: [PATCH 466/632] Oracle COLUMN_VALUE is a column name, not a keyword Fixed issue in Oracle compiler where the syntax for :meth:`.FunctionElement.column_valued` was incorrect, rendering the name ``COLUMN_VALUE`` without qualifying the source table correctly. Fixes: #8945 Change-Id: Ia04bbdc68168e78b67a74bb3834a63f5d5000627 (cherry picked from commit 655be0237367462a01a9c86cdef9e9afab06d6d6) --- doc/build/changelog/unreleased_14/8827.rst | 13 ++++++----- doc/build/changelog/unreleased_14/8945.rst | 8 +++++++ doc/build/tutorial/data_select.rst | 2 +- lib/sqlalchemy/dialects/oracle/base.py | 2 +- test/base/test_tutorials.py | 1 + test/dialect/oracle/test_compiler.py | 26 ++++++++++++++++++++-- test/dialect/oracle/test_dialect.py | 21 ++++++++++++----- 7 files changed, 59 insertions(+), 14 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8945.rst diff --git a/doc/build/changelog/unreleased_14/8827.rst b/doc/build/changelog/unreleased_14/8827.rst index 677277e45d7..2201cefb5b3 100644 --- a/doc/build/changelog/unreleased_14/8827.rst +++ b/doc/build/changelog/unreleased_14/8827.rst @@ -2,8 +2,11 @@ :tags: bug, sql :tickets: 8827 - Fixed a series of issues regarding positionally rendered bound parameters, - such as those used for SQLite, asyncpg, MySQL and others. Some compiled - forms would not maintain the order of parameters correctly, such as the - PostgreSQL ``regexp_replace()`` function as well as within the "nesting" - feature of the :class:`.CTE` construct first introduced in :ticket:`4123`. + Fixed a series of issues regarding the position and sometimes the identity + of rendered bound parameters, such as those used for SQLite, asyncpg, + MySQL, Oracle and others. Some compiled forms would not maintain the order + of parameters correctly, such as the PostgreSQL ``regexp_replace()`` + function, the "nesting" feature of the :class:`.CTE` construct first + introduced in :ticket:`4123`, and selectable tables formed by using the + :meth:`.FunctionElement.column_valued` method with Oracle. + diff --git a/doc/build/changelog/unreleased_14/8945.rst b/doc/build/changelog/unreleased_14/8945.rst new file mode 100644 index 00000000000..e1b4bd6935f --- /dev/null +++ b/doc/build/changelog/unreleased_14/8945.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, oracle + :tickets: 8945 + :versions: 2.0.0b5 + + Fixed issue in Oracle compiler where the syntax for + :meth:`.FunctionElement.column_valued` was incorrect, rendering the name + ``COLUMN_VALUE`` without qualifying the source table correctly. diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index 9b0b887da15..b34ab648cd3 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -1737,7 +1737,7 @@ it is usable for custom SQL functions:: >>> from sqlalchemy.dialects import oracle >>> stmt = select(func.scalar_strings(5).column_valued("s")) >>> print(stmt.compile(dialect=oracle.dialect())) - SELECT COLUMN_VALUE s + SELECT s.COLUMN_VALUE FROM TABLE (scalar_strings(:scalar_strings_1)) s diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 417ab84b7b7..934d4c719e2 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -948,7 +948,7 @@ def visit_table_valued_column(self, element, **kw): text = super(OracleCompiler, self).visit_table_valued_column( element, **kw ) - text = "COLUMN_VALUE " + text + text = text + ".COLUMN_VALUE" return text def default_from(self): diff --git a/test/base/test_tutorials.py b/test/base/test_tutorials.py index bde7baa7505..05d884b4da6 100644 --- a/test/base/test_tutorials.py +++ b/test/base/test_tutorials.py @@ -14,6 +14,7 @@ class DocTest(fixtures.TestBase): __requires__ = ("python3",) + __only_on__ = "sqlite+pysqlite" def _setup_logger(self): rootlogger = logging.getLogger("sqlalchemy.engine.Engine") diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 2c586990813..08b68f0f030 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -1580,19 +1580,41 @@ def test_scalar_alias_column(self): stmt = select(fn.alias().column) self.assert_compile( stmt, - "SELECT COLUMN_VALUE anon_1 " + "SELECT anon_1.COLUMN_VALUE " "FROM TABLE (scalar_strings(:scalar_strings_1)) anon_1", ) + def test_scalar_alias_multi_columns(self): + fn1 = func.scalar_strings(5) + fn2 = func.scalar_strings(3) + stmt = select(fn1.alias().column, fn2.alias().column) + self.assert_compile( + stmt, + "SELECT anon_1.COLUMN_VALUE, anon_2.COLUMN_VALUE FROM TABLE " + "(scalar_strings(:scalar_strings_1)) anon_1, " + "TABLE (scalar_strings(:scalar_strings_2)) anon_2", + ) + def test_column_valued(self): fn = func.scalar_strings(5) stmt = select(fn.column_valued()) self.assert_compile( stmt, - "SELECT COLUMN_VALUE anon_1 " + "SELECT anon_1.COLUMN_VALUE " "FROM TABLE (scalar_strings(:scalar_strings_1)) anon_1", ) + def test_multi_column_valued(self): + fn1 = func.scalar_strings(5) + fn2 = func.scalar_strings(3) + stmt = select(fn1.column_valued(), fn2.column_valued().label("x")) + self.assert_compile( + stmt, + "SELECT anon_1.COLUMN_VALUE, anon_2.COLUMN_VALUE AS x FROM " + "TABLE (scalar_strings(:scalar_strings_1)) anon_1, " + "TABLE (scalar_strings(:scalar_strings_2)) anon_2", + ) + def test_table_valued(self): fn = func.three_pairs().table_valued("string1", "string2") stmt = select(fn.c.string1, fn.c.string2) diff --git a/test/dialect/oracle/test_dialect.py b/test/dialect/oracle/test_dialect.py index f494b59aeff..8a388889321 100644 --- a/test/dialect/oracle/test_dialect.py +++ b/test/dialect/oracle/test_dialect.py @@ -1,5 +1,6 @@ # coding: utf-8 +import itertools import re from sqlalchemy import bindparam @@ -1058,7 +1059,7 @@ def scalar_strings(self, connection): connection.exec_driver_sql( r""" CREATE OR REPLACE FUNCTION scalar_strings ( - count_in IN INTEGER) + count_in IN INTEGER, string_in IN VARCHAR2) RETURN strings_t AUTHID DEFINER IS @@ -1068,7 +1069,7 @@ def scalar_strings(self, connection): FOR indx IN 1 .. count_in LOOP - l_strings (indx) := 'some string'; + l_strings (indx) := string_in; END LOOP; RETURN l_strings; @@ -1118,7 +1119,8 @@ def two_strings(self, connection): def test_scalar_strings_control(self, scalar_strings, connection): result = ( connection.exec_driver_sql( - "SELECT COLUMN_VALUE my_string FROM TABLE (scalar_strings (5))" + "SELECT COLUMN_VALUE my_string FROM TABLE " + "(scalar_strings (5, 'some string'))" ) .scalars() .all() @@ -1129,7 +1131,7 @@ def test_scalar_strings_named_control(self, scalar_strings, connection): result = ( connection.exec_driver_sql( "SELECT COLUMN_VALUE anon_1 " - "FROM TABLE (scalar_strings (5)) anon_1" + "FROM TABLE (scalar_strings (5, 'some string')) anon_1" ) .scalars() .all() @@ -1137,7 +1139,7 @@ def test_scalar_strings_named_control(self, scalar_strings, connection): eq_(result, ["some string"] * 5) def test_scalar_strings(self, scalar_strings, connection): - fn = func.scalar_strings(5) + fn = func.scalar_strings(5, "some string") result = connection.execute(select(fn.column_valued())).scalars().all() eq_(result, ["some string"] * 5) @@ -1152,6 +1154,15 @@ def test_two_strings(self, two_strings, connection): result = connection.execute(select(fn.c.string1, fn.c.string2)).all() eq_(result, [("a", "b"), ("c", "d"), ("e", "f")]) + def test_two_independent_tables(self, scalar_strings, connection): + fn1 = func.scalar_strings(5, "string one").column_valued() + fn2 = func.scalar_strings(3, "string two").column_valued() + result = connection.execute(select(fn1, fn2).where(fn1 != fn2)).all() + eq_( + result, + list(itertools.product(["string one"] * 5, ["string two"] * 3)), + ) + class OptimizedFetchLimitOffsetTest(test_select.FetchLimitOffsetTest): __only_on__ = "oracle" From c68f6b28ed1294b955d18707947f44951a1c482e Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 8 Dec 2022 19:34:49 +0100 Subject: [PATCH 467/632] Better syncronize async result docs with plain ones. Removed non-functional method ``merge`` from :class:`_asyncio.AsyncResult`. This method was non-functional and non-testes since the first introduction of asyncio in SQLAlchemy. Fixes: #7158 Fixes: #8952 Change-Id: Ibc3d17be8a8b7cab9bf2074f0408f74b4c4b161d (cherry picked from commit ab8a21c613fb6c69b07f053e4622a4426b2e9ef0) --- .gitignore | 2 + doc/build/changelog/unreleased_14/8952.rst | 8 + doc/build/core/connections.rst | 3 +- doc/build/orm/extensions/asyncio.rst | 3 + lib/sqlalchemy/engine/result.py | 279 +++++++++++---------- lib/sqlalchemy/ext/asyncio/result.py | 148 +++++------ 6 files changed, 219 insertions(+), 224 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8952.rst diff --git a/.gitignore b/.gitignore index 01d436800c5..329aa3577a8 100644 --- a/.gitignore +++ b/.gitignore @@ -38,5 +38,7 @@ test/test_schema.db /querytest.db /.pytest_cache /db_idents.txt +.DS_Store +.vs # items that only belong in the 2.0 branch /lib/sqlalchemy/cyextension diff --git a/doc/build/changelog/unreleased_14/8952.rst b/doc/build/changelog/unreleased_14/8952.rst new file mode 100644 index 00000000000..bb40306b598 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8952.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, asyncio + :tickets: 8952 + :versions: 2.0.0b5 + + Removed non-functional method ``merge`` from :class:`_asyncio.AsyncResult`. + This method was non-functional and non-tested since the first introduction + of asyncio in SQLAlchemy. diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 9481d9d4e41..8c3039df0e6 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -2227,7 +2227,7 @@ Connection / Engine API :inherited-members: -Result Set API +Result Set API ================= .. autoclass:: BaseCursorResult @@ -2280,4 +2280,3 @@ Result Set API .. autoclass:: RowMapping :members: - diff --git a/doc/build/orm/extensions/asyncio.rst b/doc/build/orm/extensions/asyncio.rst index 9ae1dfc2378..4bf087e0583 100644 --- a/doc/build/orm/extensions/asyncio.rst +++ b/doc/build/orm/extensions/asyncio.rst @@ -857,12 +857,15 @@ cursor. .. autoclass:: AsyncResult :members: + :inherited-members: .. autoclass:: AsyncScalarResult :members: + :inherited-members: .. autoclass:: AsyncMappingResult :members: + :inherited-members: ORM Session API Documentation ----------------------------- diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 898d3d88cd5..8eb1bedf891 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -690,7 +690,7 @@ class _WithKeys(object): # py2k does not allow overriding the __doc__ attribute. def keys(self): """Return an iterable view which yields the string keys that would - be represented by each :class:`.Row`. + be represented by each :class:`_engine.Row`. The keys can represent the labels of the columns returned by a core statement or the names of the orm classes returned by an orm @@ -711,20 +711,21 @@ def keys(self): class Result(_WithKeys, ResultInternal): """Represent a set of database results. - .. versionadded:: 1.4 The :class:`.Result` object provides a completely - updated usage model and calling facade for SQLAlchemy Core and - SQLAlchemy ORM. In Core, it forms the basis of the - :class:`.CursorResult` object which replaces the previous - :class:`.ResultProxy` interface. When using the ORM, a higher level - object called :class:`.ChunkedIteratorResult` is normally used. + .. versionadded:: 1.4 The :class:`_engine.Result` object provides a + completely updated usage model and calling facade for SQLAlchemy + Core and SQLAlchemy ORM. In Core, it forms the basis of the + :class:`_engine.CursorResult` object which replaces the previous + :class:`_engine.ResultProxy` interface. When using the ORM, a + higher level object called :class:`_engine.ChunkedIteratorResult` + is normally used. .. note:: In SQLAlchemy 1.4 and above, this object is used for ORM results returned by :meth:`_orm.Session.execute`, which can yield instances of ORM mapped objects either individually or within - tuple-like rows. Note that the :class:`_result.Result` object does not + tuple-like rows. Note that the :class:`_engine.Result` object does not deduplicate instances or rows automatically as is the case with the legacy :class:`_orm.Query` object. For in-Python de-duplication of - instances or rows, use the :meth:`_result.Result.unique` modifier + instances or rows, use the :meth:`_engine.Result.unique` modifier method. .. seealso:: @@ -750,7 +751,7 @@ def _soft_close(self, hard=False): raise NotImplementedError() def close(self): - """close this :class:`_result.Result`. + """close this :class:`_engine.Result`. The behavior of this method is implementation specific, and is not implemented by default. The method should generally end @@ -759,7 +760,7 @@ def close(self): :class:`.ResourceClosedError`. .. versionadded:: 1.4.27 - ``.close()`` was previously not generally - available for all :class:`_result.Result` classes, instead only + available for all :class:`_engine.Result` classes, instead only being available on the :class:`_engine.CursorResult` returned for Core statement executions. As most other result objects, namely the ones used by the ORM, are proxying a :class:`_engine.CursorResult` @@ -771,6 +772,19 @@ def close(self): """ self._soft_close(hard=True) + @property + def _soft_closed(self): + raise NotImplementedError() + + @property + def closed(self): + """return ``True`` if this :class:`_engine.Result` reports .closed + + .. versionadded:: 1.4.43 + + """ + raise NotImplementedError() + @_generative def yield_per(self, num): """Configure the row-fetching strategy to fetch ``num`` rows at a time. @@ -822,19 +836,6 @@ def yield_per(self, num): """ self._yield_per = num - @property - def _soft_closed(self): - raise NotImplementedError() - - @property - def closed(self): - """return True if this :class:`.Result` reports .closed - - .. versionadded:: 1.4.43 - - """ - raise NotImplementedError() - @_generative def unique(self, strategy=None): """Apply unique filtering to the objects returned by this @@ -914,7 +915,7 @@ def columns(self, *col_expressions): return self._column_slices(col_expressions) def scalars(self, index=0): - """Return a :class:`_result.ScalarResult` filtering object which + """Return a :class:`_engine.ScalarResult` filtering object which will return single elements rather than :class:`_row.Row` objects. E.g.:: @@ -923,24 +924,24 @@ def scalars(self, index=0): >>> result.scalars().all() [1, 2, 3] - When results are fetched from the :class:`_result.ScalarResult` + When results are fetched from the :class:`_engine.ScalarResult` filtering object, the single column-row that would be returned by the - :class:`_result.Result` is instead returned as the column's value. + :class:`_engine.Result` is instead returned as the column's value. .. versionadded:: 1.4 :param index: integer or row key indicating the column to be fetched from each row, defaults to ``0`` indicating the first column. - :return: a new :class:`_result.ScalarResult` filtering object referring - to this :class:`_result.Result` object. + :return: a new :class:`_engine.ScalarResult` filtering object referring + to this :class:`_engine.Result` object. """ return ScalarResult(self, index) def _getter(self, key, raiseerr=True): """return a callable that will retrieve the given key from a - :class:`.Row`. + :class:`_engine.Row`. """ if self._source_supports_scalars: @@ -951,7 +952,7 @@ def _getter(self, key, raiseerr=True): def _tuple_getter(self, keys): """return a callable that will retrieve the given keys from a - :class:`.Row`. + :class:`_engine.Row`. """ if self._source_supports_scalars: @@ -962,15 +963,16 @@ def _tuple_getter(self, keys): def mappings(self): """Apply a mappings filter to returned rows, returning an instance of - :class:`_result.MappingResult`. + :class:`_engine.MappingResult`. When this filter is applied, fetching rows will return - :class:`.RowMapping` objects instead of :class:`.Row` objects. + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. .. versionadded:: 1.4 - :return: a new :class:`_result.MappingResult` filtering object - referring to this :class:`_result.Result` object. + :return: a new :class:`_engine.MappingResult` filtering object + referring to this :class:`_engine.Result` object. """ @@ -979,7 +981,7 @@ def mappings(self): def _raw_row_iterator(self): """Return a safe iterator that yields raw row data. - This is used by the :meth:`._engine.Result.merge` method + This is used by the :meth:`_engine.Result.merge` method to merge multiple compatible results together. """ @@ -1053,7 +1055,6 @@ def partitions(self, size=None): :ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel` - """ getter = self._manyrow_getter @@ -1082,8 +1083,8 @@ def fetchone(self): :meth:`_engine.Result.first` method. To iterate through all rows, iterate the :class:`_engine.Result` object directly. - :return: a :class:`.Row` object if no filters are applied, or None - if no rows remain. + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. """ row = self._onerow_getter(self) @@ -1100,10 +1101,14 @@ def fetchmany(self, size=None): This method is provided for backwards compatibility with SQLAlchemy 1.x.x. - To fetch rows in groups, use the :meth:`._result.Result.partitions` + To fetch rows in groups, use the :meth:`_engine.Result.partitions` method. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. + + .. seealso:: + + :meth:`_engine.Result.partitions` """ @@ -1117,25 +1122,28 @@ def all(self): .. versionadded:: 1.4 - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. """ return self._allrows() def first(self): - """Fetch the first row or None if no row is present. + """Fetch the first row or ``None`` if no row is present. Closes the result set and discards remaining rows. .. note:: This method returns one **row**, e.g. tuple, by default. To return exactly one single scalar value, that is, the first - column of the first row, use the :meth:`.Result.scalar` method, - or combine :meth:`.Result.scalars` and :meth:`.Result.first`. + column of the first row, use the + :meth:`_engine.Result.scalar` method, + or combine :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.first`. Additionally, in contrast to the behavior of the legacy ORM :meth:`_orm.Query.first` method, **no limit is applied** to the - SQL query which was invoked to produce this :class:`_engine.Result`; + SQL query which was invoked to produce this + :class:`_engine.Result`; for a DBAPI driver that buffers results in memory before yielding rows, all rows will be sent to the Python process and all but the first row will be discarded. @@ -1144,14 +1152,14 @@ def first(self): :ref:`migration_20_unify_select` - :return: a :class:`.Row` object, or None + :return: a :class:`_engine.Row` object, or None if no rows remain. .. seealso:: - :meth:`_result.Result.scalar` + :meth:`_engine.Result.scalar` - :meth:`_result.Result.one` + :meth:`_engine.Result.one` """ @@ -1168,15 +1176,16 @@ def one_or_none(self): .. versionadded:: 1.4 - :return: The first :class:`.Row` or None if no row is available. + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. :raises: :class:`.MultipleResultsFound` .. seealso:: - :meth:`_result.Result.first` + :meth:`_engine.Result.first` - :meth:`_result.Result.one` + :meth:`_engine.Result.one` """ return self._only_one_row( @@ -1186,14 +1195,14 @@ def one_or_none(self): def scalar_one(self): """Return exactly one scalar result or raise an exception. - This is equivalent to calling :meth:`.Result.scalars` and then - :meth:`.Result.one`. + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.Result.one`. .. seealso:: - :meth:`.Result.one` + :meth:`_engine.Result.one` - :meth:`.Result.scalars` + :meth:`_engine.Result.scalars` """ return self._only_one_row( @@ -1201,16 +1210,16 @@ def scalar_one(self): ) def scalar_one_or_none(self): - """Return exactly one or no scalar result. + """Return exactly one scalar result or ``None``. - This is equivalent to calling :meth:`.Result.scalars` and then - :meth:`.Result.one_or_none`. + This is equivalent to calling :meth:`_engine.Result.scalars` and + then :meth:`_engine.Result.one_or_none`. .. seealso:: - :meth:`.Result.one_or_none` + :meth:`_engine.Result.one_or_none` - :meth:`.Result.scalars` + :meth:`_engine.Result.scalars` """ return self._only_one_row( @@ -1226,22 +1235,24 @@ def one(self): .. note:: This method returns one **row**, e.g. tuple, by default. To return exactly one single scalar value, that is, the first - column of the first row, use the :meth:`.Result.scalar_one` method, - or combine :meth:`.Result.scalars` and :meth:`.Result.one`. + column of the first row, use the + :meth:`_engine.Result.scalar_one` method, or combine + :meth:`_engine.Result.scalars` and + :meth:`_engine.Result.one`. .. versionadded:: 1.4 - :return: The first :class:`.Row`. + :return: The first :class:`_engine.Row`. :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` .. seealso:: - :meth:`_result.Result.first` + :meth:`_engine.Result.first` - :meth:`_result.Result.one_or_none` + :meth:`_engine.Result.one_or_none` - :meth:`_result.Result.scalar_one` + :meth:`_engine.Result.scalar_one` """ return self._only_one_row( @@ -1251,7 +1262,7 @@ def one(self): def scalar(self): """Fetch the first column of the first row, and close the result set. - Returns None if there are no rows to fetch. + Returns ``None`` if there are no rows to fetch. No validation is performed to test if additional rows remain. @@ -1259,7 +1270,7 @@ def scalar(self): e.g. the :meth:`_engine.CursorResult.close` method will have been called. - :return: a Python scalar value , or None if no rows remain. + :return: a Python scalar value, or ``None`` if no rows remain. """ return self._only_one_row( @@ -1268,7 +1279,7 @@ def scalar(self): def freeze(self): """Return a callable object that will produce copies of this - :class:`.Result` when invoked. + :class:`_engine.Result` when invoked. The callable object returned is an instance of :class:`_engine.FrozenResult`. @@ -1290,7 +1301,7 @@ def freeze(self): return FrozenResult(self) def merge(self, *others): - """Merge this :class:`.Result` with other compatible result + """Merge this :class:`_engine.Result` with other compatible result objects. The object returned is an instance of :class:`_engine.MergedResult`, @@ -1308,11 +1319,11 @@ def merge(self, *others): class FilterResult(ResultInternal): """A wrapper for a :class:`_engine.Result` that returns objects other than - :class:`_result.Row` objects, such as dictionaries or scalar objects. + :class:`_engine.Row` objects, such as dictionaries or scalar objects. - :class:`.FilterResult` is the common base for additional result - APIs including :class:`.MappingResult`, :class:`.ScalarResult` - and :class:`.AsyncResult`. + :class:`_engine.FilterResult` is the common base for additional result + APIs including :class:`_engine.MappingResult`, + :class:`_engine.ScalarResult` and :class:`_engine.AsyncResult`. """ @@ -1348,7 +1359,8 @@ def _soft_closed(self): @property def closed(self): - """return True if the underlying result reports .closed + """Return ``True`` if the underlying :class:`_engine.Result` reports + closed .. versionadded:: 1.4.43 @@ -1356,7 +1368,7 @@ def closed(self): return self._real_result.closed # type: ignore def close(self): - """Close this :class:`.FilterResult`. + """Close this :class:`_engine.FilterResult`. .. versionadded:: 1.4.43 @@ -1381,16 +1393,16 @@ def _fetchmany_impl(self, size=None): class ScalarResult(FilterResult): - """A wrapper for a :class:`_result.Result` that returns scalar values + """A wrapper for a :class:`_engine.Result` that returns scalar values rather than :class:`_row.Row` values. - The :class:`_result.ScalarResult` object is acquired by calling the - :meth:`_result.Result.scalars` method. + The :class:`_engine.ScalarResult` object is acquired by calling the + :meth:`_engine.Result.scalars` method. - A special limitation of :class:`_result.ScalarResult` is that it has + A special limitation of :class:`_engine.ScalarResult` is that it has no ``fetchone()`` method; since the semantics of ``fetchone()`` are that the ``None`` value indicates no more results, this is not compatible - with :class:`_result.ScalarResult` since there is no way to distinguish + with :class:`_engine.ScalarResult` since there is no way to distinguish between ``None`` as a row value versus ``None`` as an indicator. Use ``next(result)`` to receive values individually. @@ -1423,8 +1435,8 @@ def unique(self, strategy=None): def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. - Equivalent to :meth:`_result.Result.partitions` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.partitions` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1446,8 +1458,8 @@ def fetchall(self): def fetchmany(self, size=None): """Fetch many objects. - Equivalent to :meth:`_result.Result.fetchmany` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.fetchmany` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1456,8 +1468,8 @@ def fetchmany(self, size=None): def all(self): """Return all scalar values in a list. - Equivalent to :meth:`_result.Result.all` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.all` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1475,10 +1487,10 @@ def next(self): # noqa return self._next_impl() def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. - Equivalent to :meth:`_result.Result.first` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.first` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. @@ -1490,8 +1502,8 @@ def first(self): def one_or_none(self): """Return at most one object or raise an exception. - Equivalent to :meth:`_result.Result.one_or_none` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.one_or_none` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1502,8 +1514,8 @@ def one_or_none(self): def one(self): """Return exactly one object or raise an exception. - Equivalent to :meth:`_result.Result.one` except that - scalar values, rather than :class:`_result.Row` objects, + Equivalent to :meth:`_engine.Result.one` except that + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -1549,9 +1561,9 @@ def columns(self, *col_expressions): def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. - Equivalent to :meth:`_result.Result.partitions` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.partitions` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1572,9 +1584,9 @@ def fetchall(self): def fetchone(self): """Fetch one object. - Equivalent to :meth:`_result.Result.fetchone` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.fetchone` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1587,9 +1599,9 @@ def fetchone(self): def fetchmany(self, size=None): """Fetch many objects. - Equivalent to :meth:`_result.Result.fetchmany` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.fetchmany` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1598,9 +1610,9 @@ def fetchmany(self, size=None): def all(self): """Return all scalar values in a list. - Equivalent to :meth:`_result.Result.all` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.all` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1618,11 +1630,11 @@ def next(self): # noqa return self._next_impl() def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. - Equivalent to :meth:`_result.Result.first` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.first` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -1633,9 +1645,9 @@ def first(self): def one_or_none(self): """Return at most one object or raise an exception. - Equivalent to :meth:`_result.Result.one_or_none` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.one_or_none` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return self._only_one_row( @@ -1645,9 +1657,9 @@ def one_or_none(self): def one(self): """Return exactly one object or raise an exception. - Equivalent to :meth:`_result.Result.one` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + Equivalent to :meth:`_engine.Result.one` except that + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return self._only_one_row( @@ -1656,15 +1668,15 @@ def one(self): class FrozenResult(object): - """Represents a :class:`.Result` object in a "frozen" state suitable + """Represents a :class:`_engine.Result` object in a "frozen" state suitable for caching. The :class:`_engine.FrozenResult` object is returned from the :meth:`_engine.Result.freeze` method of any :class:`_engine.Result` object. - A new iterable :class:`.Result` object is generated from a fixed - set of data each time the :class:`.FrozenResult` is invoked as + A new iterable :class:`_engine.Result` object is generated from a fixed + set of data each time the :class:`_engine.FrozenResult` is invoked as a callable:: @@ -1730,8 +1742,8 @@ def __call__(self): class IteratorResult(Result): - """A :class:`.Result` that gets data from a Python iterator of - :class:`.Row` objects. + """A :class:`_engine.Result` that gets data from a Python iterator of + :class:`_engine.Row` objects or similar row-like data. .. versionadded:: 1.4 @@ -1752,6 +1764,16 @@ def __init__( self.raw = raw self._source_supports_scalars = _source_supports_scalars + @property + def closed(self): + """Return ``True`` if this :class:`_engine.IteratorResult` has + been closed + + .. versionadded:: 1.4.43 + + """ + return self._hard_closed + def _soft_close(self, hard=False, **kw): if hard: self._hard_closed = True @@ -1761,15 +1783,6 @@ def _soft_close(self, hard=False, **kw): self._reset_memoizations() self._soft_closed = True - @property - def closed(self): - """return True if this :class:`.IteratorResult` has been closed - - .. versionadded:: 1.4.43 - - """ - return self._hard_closed - def _raise_hard_closed(self): raise exc.ResourceClosedError("This result object is closed.") @@ -1813,8 +1826,8 @@ def null_result(): class ChunkedIteratorResult(IteratorResult): - """An :class:`.IteratorResult` that works from an iterator-producing - callable. + """An :class:`_engine.IteratorResult` that works from an + iterator-producing callable. The given ``chunks`` argument is a function that is given a number of rows to return in each chunk, or ``None`` for all rows. The function should diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index a77b6a8c943..15553948ba6 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -9,9 +9,9 @@ from . import exc as async_exc from ...engine.result import _NO_ROW +from ...engine.result import _WithKeys from ...engine.result import FilterResult from ...engine.result import FrozenResult -from ...engine.result import MergedResult from ...sql.base import _generative from ...util.concurrency import greenlet_spawn @@ -23,7 +23,7 @@ async def close(self): await greenlet_spawn(self._real_result.close) -class AsyncResult(AsyncCommon): +class AsyncResult(_WithKeys, AsyncCommon): """An asyncio wrapper around a :class:`_result.Result` object. The :class:`_asyncio.AsyncResult` only applies to statement executions that @@ -57,13 +57,6 @@ def __init__(self, real_result): "_row_getter", real_result.__dict__["_row_getter"] ) - def keys(self): - """Return the :meth:`_engine.Result.keys` collection from the - underlying :class:`_engine.Result`. - - """ - return self._metadata.keys - @_generative def unique(self, strategy=None): """Apply unique filtering to the objects returned by this @@ -72,7 +65,6 @@ def unique(self, strategy=None): Refer to :meth:`_engine.Result.unique` in the synchronous SQLAlchemy API for a complete behavioral description. - """ self._unique_filter_state = (set(), strategy) @@ -82,7 +74,6 @@ def columns(self, *col_expressions): Refer to :meth:`_engine.Result.columns` in the synchronous SQLAlchemy API for a complete behavioral description. - """ return self._column_slices(col_expressions) @@ -97,9 +88,8 @@ async def scroll_results(connection): async for partition in result.partitions(100): print("list of rows: %s" % partition) - .. seealso:: - - :meth:`_engine.Result.partitions` + Refer to :meth:`_engine.Result.partitions` in the synchronous + SQLAlchemy API for a complete behavioral description. """ @@ -121,11 +111,11 @@ async def fetchone(self): SQLAlchemy 1.x.x. To fetch the first row of a result only, use the - :meth:`_engine.Result.first` method. To iterate through all - rows, iterate the :class:`_engine.Result` object directly. + :meth:`_asyncio.AsyncResult.first` method. To iterate through all + rows, iterate the :class:`_asyncio.AsyncResult` object directly. - :return: a :class:`.Row` object if no filters are applied, or None - if no rows remain. + :return: a :class:`_engine.Row` object if no filters are applied, + or ``None`` if no rows remain. """ row = await greenlet_spawn(self._onerow_getter, self) @@ -145,7 +135,7 @@ async def fetchmany(self, size=None): To fetch rows in groups, use the :meth:`._asyncio.AsyncResult.partitions` method. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. .. seealso:: @@ -161,7 +151,7 @@ async def all(self): Closes the result set after invocation. Subsequent invocations will return an empty list. - :return: a list of :class:`.Row` objects. + :return: a list of :class:`_engine.Row` objects. """ @@ -178,17 +168,30 @@ async def __anext__(self): return row async def first(self): - """Fetch the first row or None if no row is present. + """Fetch the first row or ``None`` if no row is present. Closes the result set and discards remaining rows. - .. note:: This method returns one **row**, e.g. tuple, by default. To - return exactly one single scalar value, that is, the first column of - the first row, use the :meth:`_asyncio.AsyncResult.scalar` method, + .. note:: This method returns one **row**, e.g. tuple, by default. + To return exactly one single scalar value, that is, the first + column of the first row, use the + :meth:`_asyncio.AsyncResult.scalar` method, or combine :meth:`_asyncio.AsyncResult.scalars` and :meth:`_asyncio.AsyncResult.first`. - :return: a :class:`.Row` object, or None + Additionally, in contrast to the behavior of the legacy ORM + :meth:`_orm.Query.first` method, **no limit is applied** to the + SQL query which was invoked to produce this + :class:`_asyncio.AsyncResult`; + for a DBAPI driver that buffers results in memory before yielding + rows, all rows will be sent to the Python process and all but + the first row will be discarded. + + .. seealso:: + + :ref:`migration_20_unify_select` + + :return: a :class:`_engine.Row` object, or None if no rows remain. .. seealso:: @@ -209,7 +212,8 @@ async def one_or_none(self): .. versionadded:: 1.4 - :return: The first :class:`.Row` or None if no row is available. + :return: The first :class:`_engine.Row` or ``None`` if no row + is available. :raises: :class:`.MultipleResultsFound` @@ -238,7 +242,7 @@ async def scalar_one(self): return await greenlet_spawn(self._only_one_row, True, True, True) async def scalar_one_or_none(self): - """Return exactly one or no scalar result. + """Return exactly one scalar result or ``None``. This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and then :meth:`_asyncio.AsyncResult.one_or_none`. @@ -268,7 +272,7 @@ async def one(self): .. versionadded:: 1.4 - :return: The first :class:`.Row`. + :return: The first :class:`_engine.Row`. :raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound` @@ -286,7 +290,7 @@ async def one(self): async def scalar(self): """Fetch the first column of the first row, and close the result set. - Returns None if there are no rows to fetch. + Returns ``None`` if there are no rows to fetch. No validation is performed to test if additional rows remain. @@ -294,7 +298,7 @@ async def scalar(self): e.g. the :meth:`_engine.CursorResult.close` method will have been called. - :return: a Python scalar value , or None if no rows remain. + :return: a Python scalar value, or ``None`` if no rows remain. """ return await greenlet_spawn(self._only_one_row, False, False, True) @@ -322,22 +326,6 @@ async def freeze(self): return await greenlet_spawn(FrozenResult, self) - def merge(self, *others): - """Merge this :class:`_asyncio.AsyncResult` with other compatible - result objects. - - The object returned is an instance of :class:`_engine.MergedResult`, - which will be composed of iterators from the given result - objects. - - The new result will use the metadata from this result object. - The subsequent result objects must be against an identical - set of result / cursor metadata, otherwise the behavior is - undefined. - - """ - return MergedResult(self._metadata, (self,) + others) - def scalars(self, index=0): """Return an :class:`_asyncio.AsyncScalarResult` filtering object which will return single elements rather than :class:`_row.Row` objects. @@ -359,10 +347,8 @@ def mappings(self): :class:`_asyncio.AsyncMappingResult`. When this filter is applied, fetching rows will return - :class:`.RowMapping` objects instead of :class:`.Row` objects. - - Refer to :meth:`_result.Result.mappings` in the synchronous - SQLAlchemy API for a complete behavioral description. + :class:`_engine.RowMapping` objects instead of :class:`_engine.Row` + objects. :return: a new :class:`_asyncio.AsyncMappingResult` filtering object referring to the underlying :class:`_result.Result` object. @@ -414,7 +400,7 @@ async def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -437,7 +423,7 @@ async def fetchmany(self, size=None): """Fetch many objects. Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -447,7 +433,7 @@ async def all(self): """Return all scalar values in a list. Equivalent to :meth:`_asyncio.AsyncResult.all` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -464,10 +450,10 @@ async def __anext__(self): return row async def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. Equivalent to :meth:`_asyncio.AsyncResult.first` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -477,7 +463,7 @@ async def one_or_none(self): """Return at most one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ @@ -487,14 +473,14 @@ async def one(self): """Return exactly one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one` except that - scalar values, rather than :class:`_result.Row` objects, + scalar values, rather than :class:`_engine.Row` objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, True, False) -class AsyncMappingResult(AsyncCommon): +class AsyncMappingResult(_WithKeys, AsyncCommon): """A wrapper for a :class:`_asyncio.AsyncResult` that returns dictionary values rather than :class:`_engine.Row` values. @@ -519,21 +505,6 @@ def __init__(self, result): if result._source_supports_scalars: self._metadata = self._metadata._reduce([0]) - def keys(self): - """Return an iterable view which yields the string keys that would - be represented by each :class:`.Row`. - - The view also can be tested for key containment using the Python - ``in`` operator, which will test both for the string keys represented - in the view, as well as for alternate keys such as column objects. - - .. versionchanged:: 1.4 a key view object is returned rather than a - plain list. - - - """ - return self._metadata.keys - def unique(self, strategy=None): """Apply unique filtering to the objects returned by this :class:`_asyncio.AsyncMappingResult`. @@ -552,8 +523,8 @@ async def partitions(self, size=None): """Iterate through sub-lists of elements of the size given. Equivalent to :meth:`_asyncio.AsyncResult.partitions` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -575,8 +546,8 @@ async def fetchone(self): """Fetch one object. Equivalent to :meth:`_asyncio.AsyncResult.fetchone` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -590,8 +561,8 @@ async def fetchmany(self, size=None): """Fetch many objects. Equivalent to :meth:`_asyncio.AsyncResult.fetchmany` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -601,8 +572,8 @@ async def all(self): """Return all scalar values in a list. Equivalent to :meth:`_asyncio.AsyncResult.all` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ @@ -619,12 +590,11 @@ async def __anext__(self): return row async def first(self): - """Fetch the first object or None if no object is present. + """Fetch the first object or ``None`` if no object is present. Equivalent to :meth:`_asyncio.AsyncResult.first` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. - + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, False, False, False) @@ -633,8 +603,8 @@ async def one_or_none(self): """Return at most one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one_or_none` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, False, False) @@ -643,8 +613,8 @@ async def one(self): """Return exactly one object or raise an exception. Equivalent to :meth:`_asyncio.AsyncResult.one` except that - mapping values, rather than :class:`_result.Row` objects, - are returned. + :class:`_engine.RowMapping` values, rather than :class:`_engine.Row` + objects, are returned. """ return await greenlet_spawn(self._only_one_row, True, True, False) From c60ff04d671558c02d8f8eb1ea6470f896538f78 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 9 Dec 2022 15:56:15 -0500 Subject: [PATCH 468/632] look out for extras=None in freeze Fixed issue where :meth:`_engine.Result.freeze` method would not work for textual SQL using either :func:`_sql.text` or :meth:`_engine.Connection.exec_driver_sql`. Fixes: #8963 Change-Id: Ia131c6ac41a4adf32eb1bf1abf23930ef395f16c (cherry picked from commit 44170aee47a021883c6244f702de2e4385a5cd1d) --- doc/build/changelog/unreleased_14/8963.rst | 8 ++++ lib/sqlalchemy/engine/result.py | 2 +- test/sql/test_resultset.py | 46 ++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8963.rst diff --git a/doc/build/changelog/unreleased_14/8963.rst b/doc/build/changelog/unreleased_14/8963.rst new file mode 100644 index 00000000000..54711af5960 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8963.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, engine + :tickets: 8963 + + Fixed issue where :meth:`_engine.Result.freeze` method would not work for + textual SQL using either :func:`_sql.text` or + :meth:`_engine.Connection.exec_driver_sql`. + diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 898d3d88cd5..5a69b0a4ef5 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -167,7 +167,7 @@ def __init__( if extra: recs_names = [ ( - (name,) + extras, + (name,) + (extras if extras else ()), (index, name, extras), ) for index, (name, extras) in enumerate(zip(self._keys, extra)) diff --git a/test/sql/test_resultset.py b/test/sql/test_resultset.py index 11d58a57a24..e1a414bd1b0 100644 --- a/test/sql/test_resultset.py +++ b/test/sql/test_resultset.py @@ -105,6 +105,52 @@ def define_tables(cls, metadata): Column("y", String(50)), ) + @testing.variation( + "type_", ["text", "driversql", "core", "textstar", "driverstar"] + ) + def test_freeze(self, type_, connection): + """test #8963""" + + users = self.tables.users + connection.execute( + users.insert(), + [ + dict(user_id=1, user_name="john"), + dict(user_id=2, user_name="jack"), + ], + ) + + if type_.core: + stmt = select(users).order_by(users.c.user_id) + else: + if "star" in type_.name: + stmt = "select * from users order by user_id" + else: + stmt = "select user_id, user_name from users order by user_id" + + if "text" in type_.name: + stmt = text(stmt) + + if "driver" in type_.name: + result = connection.exec_driver_sql(stmt) + else: + result = connection.execute(stmt) + + frozen = result.freeze() + + unfrozen = frozen() + eq_(unfrozen.keys(), ["user_id", "user_name"]) + eq_(unfrozen.all(), [(1, "john"), (2, "jack")]) + + unfrozen = frozen() + eq_( + unfrozen.mappings().all(), + [ + {"user_id": 1, "user_name": "john"}, + {"user_id": 2, "user_name": "jack"}, + ], + ) + def test_row_iteration(self, connection): users = self.tables.users From 8e83f34a0b4f17764f64479b774fb262e0b81910 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Fri, 9 Dec 2022 09:55:34 -0700 Subject: [PATCH 469/632] Specify view columns in HasTableTest Fixes: #8960 Avoid test errors on databases that do not support CREATE VIEW vv AS SELECT * FROM Change-Id: Ic9e892aa4466030b9b325c11228dad15cf59a258 (cherry picked from commit 7e9b1450b6899c82c9362cbc92fcc0f01c97b043) --- lib/sqlalchemy/testing/suite/test_reflection.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index 4e575046d37..f1b8d8aaf8a 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -76,15 +76,18 @@ def define_tables(cls, metadata): @classmethod def define_views(cls, metadata): - query = "CREATE VIEW vv AS SELECT * FROM test_table" + query = "CREATE VIEW vv AS SELECT id, data FROM test_table" event.listen(metadata, "after_create", DDL(query)) event.listen(metadata, "before_drop", DDL("DROP VIEW vv")) if testing.requires.schemas.enabled: - query = "CREATE VIEW %s.vv AS SELECT * FROM %s.test_table_s" % ( - config.test_schema, - config.test_schema, + query = ( + "CREATE VIEW %s.vv AS SELECT id, data FROM %s.test_table_s" + % ( + config.test_schema, + config.test_schema, + ) ) event.listen(metadata, "after_create", DDL(query)) event.listen( From 57866fb3b1ded4a8b357f4d8bd873cbed7e17afc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 10 Dec 2022 13:46:43 -0500 Subject: [PATCH 470/632] changelog edits for 1.4.45 Change-Id: Ic54a46c5091eda1480ce80989075527c6b3a6d7e --- doc/build/changelog/unreleased_14/8748.rst | 2 +- doc/build/changelog/unreleased_14/8952.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/build/changelog/unreleased_14/8748.rst b/doc/build/changelog/unreleased_14/8748.rst index 27e06792276..5d6704fd81b 100644 --- a/doc/build/changelog/unreleased_14/8748.rst +++ b/doc/build/changelog/unreleased_14/8748.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug postgresql + :tags: bug, postgresql :tickets: 8748 Made an adjustment to how the PostgreSQL dialect considers column types diff --git a/doc/build/changelog/unreleased_14/8952.rst b/doc/build/changelog/unreleased_14/8952.rst index bb40306b598..8be984be0f3 100644 --- a/doc/build/changelog/unreleased_14/8952.rst +++ b/doc/build/changelog/unreleased_14/8952.rst @@ -3,6 +3,6 @@ :tickets: 8952 :versions: 2.0.0b5 - Removed non-functional method ``merge`` from :class:`_asyncio.AsyncResult`. - This method was non-functional and non-tested since the first introduction - of asyncio in SQLAlchemy. + Removed non-functional ``merge()`` method from + :class:`_asyncio.AsyncResult`. This method has never worked and was + included with :class:`_asyncio.AsyncResult` in error. From b924ba949cb8aa1bd73f7438600caf92a3b732b3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 10 Dec 2022 13:47:24 -0500 Subject: [PATCH 471/632] - 1.4.45 --- doc/build/changelog/changelog_14.rst | 134 ++++++++++++++++++++- doc/build/changelog/unreleased_14/8708.rst | 9 -- doc/build/changelog/unreleased_14/8748.rst | 7 -- doc/build/changelog/unreleased_14/8800.rst | 8 -- doc/build/changelog/unreleased_14/8804.rst | 7 -- doc/build/changelog/unreleased_14/8827.rst | 12 -- doc/build/changelog/unreleased_14/8862.rst | 24 ---- doc/build/changelog/unreleased_14/8866.rst | 8 -- doc/build/changelog/unreleased_14/8881.rst | 14 --- doc/build/changelog/unreleased_14/8903.rst | 7 -- doc/build/changelog/unreleased_14/8945.rst | 8 -- doc/build/changelog/unreleased_14/8952.rst | 8 -- doc/build/changelog/unreleased_14/8963.rst | 8 -- doc/build/conf.py | 4 +- 14 files changed, 135 insertions(+), 123 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8708.rst delete mode 100644 doc/build/changelog/unreleased_14/8748.rst delete mode 100644 doc/build/changelog/unreleased_14/8800.rst delete mode 100644 doc/build/changelog/unreleased_14/8804.rst delete mode 100644 doc/build/changelog/unreleased_14/8827.rst delete mode 100644 doc/build/changelog/unreleased_14/8862.rst delete mode 100644 doc/build/changelog/unreleased_14/8866.rst delete mode 100644 doc/build/changelog/unreleased_14/8881.rst delete mode 100644 doc/build/changelog/unreleased_14/8903.rst delete mode 100644 doc/build/changelog/unreleased_14/8945.rst delete mode 100644 doc/build/changelog/unreleased_14/8952.rst delete mode 100644 doc/build/changelog/unreleased_14/8963.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 273947fcfaa..8fa037a695f 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,139 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.45 - :include_notes_from: unreleased_14 + :released: December 10, 2022 + + .. change:: + :tags: bug, orm + :tickets: 8862 + + Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the + current loaded contents of relationship attributes that were indicated with + the :paramref:`_orm.relationship.viewonly` parameter, thus defeating + strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects + from caches and other similar techniques. In a related change, fixed issue + where an object that contains a loaded relationship that was nonetheless + configured as ``lazy='raise'`` on the mapping would fail when passed to + :meth:`_orm.Session.merge`; checks for "raise" are now suspended within + the merge process assuming the :paramref:`_orm.Session.merge.load` + parameter remains at its default of ``True``. + + Overall, this is a behavioral adjustment to a change introduced in the 1.4 + series as of :ticket:`4994`, which took "merge" out of the set of cascades + applied by default to "viewonly" relationships. As "viewonly" relationships + aren't persisted under any circumstances, allowing their contents to + transfer during "merge" does not impact the persistence behavior of the + target object. This allows :meth:`_orm.Session.merge` to correctly suit one + of its use cases, that of adding objects to a :class:`.Session` that were + loaded elsewhere, often for the purposes of restoring from a cache. + + + .. change:: + :tags: bug, orm + :tickets: 8881 + + Fixed issues in :func:`_orm.with_expression` where expressions that were + composed of columns that were referenced from the enclosing SELECT would + not render correct SQL in some contexts, in the case where the expression + had a label name that matched the attribute which used + :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had + no default expression. For the moment, if the :func:`_orm.query_expression` + does have a default expression, that label name is still used for that + default, and an additional label with the same name will continue to be + ignored. Overall, this case is pretty thorny so further adjustments might + be warranted. + + .. change:: + :tags: bug, sqlite + :tickets: 8866 + + Backported a fix for SQLite reflection of unique constraints in attached + schemas, released in 2.0 as a small part of :ticket:`4379`. Previously, + unique constraints in attached schemas would be ignored by SQLite + reflection. Pull request courtesy Michael Gorven. + + .. change:: + :tags: bug, asyncio + :tickets: 8952 + :versions: 2.0.0b5 + + Removed non-functional ``merge()`` method from + :class:`_asyncio.AsyncResult`. This method has never worked and was + included with :class:`_asyncio.AsyncResult` in error. + + .. change:: + :tags: bug, oracle + :tickets: 8708 + :versions: 2.0.0b4 + + Continued fixes for Oracle fix :ticket:`8708` released in 1.4.43 where + bound parameter names that start with underscores, which are disallowed by + Oracle, were still not being properly escaped in all circumstances. + + + .. change:: + :tags: bug, postgresql + :tickets: 8748 + + Made an adjustment to how the PostgreSQL dialect considers column types + when it reflects columns from a table, to accommodate for alternative + backends which may return NULL from the PG ``format_type()`` function. + + .. change:: + :tags: usecase, sqlite + :tickets: 8903 + + Added support for the SQLite backend to reflect the "DEFERRABLE" and + "INITIALLY" keywords which may be present on a foreign key construct. Pull + request courtesy Michael Gorven. + + .. change:: + :tags: usecase, sql + :tickets: 8800 + + An informative re-raise is now thrown in the case where any "literal + bindparam" render operation fails, indicating the value itself and + the datatype in use, to assist in debugging when literal params + are being rendered in a statement. + + .. change:: + :tags: usecase, sqlite + :tickets: 8804 + + Added support for reflection of expression-oriented WHERE criteria included + in indexes on the SQLite dialect, in a manner similar to that of the + PostgreSQL dialect. Pull request courtesy Tobias Pfeiffer. + + .. change:: + :tags: bug, sql + :tickets: 8827 + + Fixed a series of issues regarding the position and sometimes the identity + of rendered bound parameters, such as those used for SQLite, asyncpg, + MySQL, Oracle and others. Some compiled forms would not maintain the order + of parameters correctly, such as the PostgreSQL ``regexp_replace()`` + function, the "nesting" feature of the :class:`.CTE` construct first + introduced in :ticket:`4123`, and selectable tables formed by using the + :meth:`.FunctionElement.column_valued` method with Oracle. + + + .. change:: + :tags: bug, oracle + :tickets: 8945 + :versions: 2.0.0b5 + + Fixed issue in Oracle compiler where the syntax for + :meth:`.FunctionElement.column_valued` was incorrect, rendering the name + ``COLUMN_VALUE`` without qualifying the source table correctly. + + .. change:: + :tags: bug, engine + :tickets: 8963 + + Fixed issue where :meth:`_engine.Result.freeze` method would not work for + textual SQL using either :func:`_sql.text` or + :meth:`_engine.Connection.exec_driver_sql`. + .. changelog:: :version: 1.4.44 diff --git a/doc/build/changelog/unreleased_14/8708.rst b/doc/build/changelog/unreleased_14/8708.rst deleted file mode 100644 index 61dcbf658ec..00000000000 --- a/doc/build/changelog/unreleased_14/8708.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 8708 - :versions: 2.0.0b4 - - Continued fixes for Oracle fix :ticket:`8708` released in 1.4.43 where - bound parameter names that start with underscores, which are disallowed by - Oracle, were still not being properly escaped in all circumstances. - diff --git a/doc/build/changelog/unreleased_14/8748.rst b/doc/build/changelog/unreleased_14/8748.rst deleted file mode 100644 index 5d6704fd81b..00000000000 --- a/doc/build/changelog/unreleased_14/8748.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 8748 - - Made an adjustment to how the PostgreSQL dialect considers column types - when it reflects columns from a table, to accommodate for alternative - backends which may return NULL from the PG ``format_type()`` function. diff --git a/doc/build/changelog/unreleased_14/8800.rst b/doc/build/changelog/unreleased_14/8800.rst deleted file mode 100644 index 8a42975df74..00000000000 --- a/doc/build/changelog/unreleased_14/8800.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: usecase, sql - :tickets: 8800 - - An informative re-raise is now thrown in the case where any "literal - bindparam" render operation fails, indicating the value itself and - the datatype in use, to assist in debugging when literal params - are being rendered in a statement. diff --git a/doc/build/changelog/unreleased_14/8804.rst b/doc/build/changelog/unreleased_14/8804.rst deleted file mode 100644 index c3f91a16d2c..00000000000 --- a/doc/build/changelog/unreleased_14/8804.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, sqlite - :tickets: 8804 - - Added support for reflection of expression-oriented WHERE criteria included - in indexes on the SQLite dialect, in a manner similar to that of the - PostgreSQL dialect. Pull request courtesy Tobias Pfeiffer. diff --git a/doc/build/changelog/unreleased_14/8827.rst b/doc/build/changelog/unreleased_14/8827.rst deleted file mode 100644 index 2201cefb5b3..00000000000 --- a/doc/build/changelog/unreleased_14/8827.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8827 - - Fixed a series of issues regarding the position and sometimes the identity - of rendered bound parameters, such as those used for SQLite, asyncpg, - MySQL, Oracle and others. Some compiled forms would not maintain the order - of parameters correctly, such as the PostgreSQL ``regexp_replace()`` - function, the "nesting" feature of the :class:`.CTE` construct first - introduced in :ticket:`4123`, and selectable tables formed by using the - :meth:`.FunctionElement.column_valued` method with Oracle. - diff --git a/doc/build/changelog/unreleased_14/8862.rst b/doc/build/changelog/unreleased_14/8862.rst deleted file mode 100644 index 3be00789096..00000000000 --- a/doc/build/changelog/unreleased_14/8862.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8862 - - Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the - current loaded contents of relationship attributes that were indicated with - the :paramref:`_orm.relationship.viewonly` parameter, thus defeating - strategies that use :meth:`_orm.Session.merge` to pull fully loaded objects - from caches and other similar techniques. In a related change, fixed issue - where an object that contains a loaded relationship that was nonetheless - configured as ``lazy='raise'`` on the mapping would fail when passed to - :meth:`_orm.Session.merge`; checks for "raise" are now suspended within - the merge process assuming the :paramref:`_orm.Session.merge.load` - parameter remains at its default of ``True``. - - Overall, this is a behavioral adjustment to a change introduced in the 1.4 - series as of :ticket:`4994`, which took "merge" out of the set of cascades - applied by default to "viewonly" relationships. As "viewonly" relationships - aren't persisted under any circumstances, allowing their contents to - transfer during "merge" does not impact the persistence behavior of the - target object. This allows :meth:`_orm.Session.merge` to correctly suit one - of its use cases, that of adding objects to a :class:`.Session` that were - loaded elsewhere, often for the purposes of restoring from a cache. - diff --git a/doc/build/changelog/unreleased_14/8866.rst b/doc/build/changelog/unreleased_14/8866.rst deleted file mode 100644 index 0b82e8d3038..00000000000 --- a/doc/build/changelog/unreleased_14/8866.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, sqlite - :tickets: 8866 - - Backported a fix for SQLite reflection of unique constraints in attached - schemas, released in 2.0 as a small part of :ticket:`4379`. Previously, - unique constraints in attached schemas would be ignored by SQLite - reflection. Pull request courtesy Michael Gorven. diff --git a/doc/build/changelog/unreleased_14/8881.rst b/doc/build/changelog/unreleased_14/8881.rst deleted file mode 100644 index 9cd62f491ab..00000000000 --- a/doc/build/changelog/unreleased_14/8881.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 8881 - - Fixed issues in :func:`_orm.with_expression` where expressions that were - composed of columns that were referenced from the enclosing SELECT would - not render correct SQL in some contexts, in the case where the expression - had a label name that matched the attribute which used - :func:`_orm.query_expression`, even when :func:`_orm.query_expression` had - no default expression. For the moment, if the :func:`_orm.query_expression` - does have a default expression, that label name is still used for that - default, and an additional label with the same name will continue to be - ignored. Overall, this case is pretty thorny so further adjustments might - be warranted. diff --git a/doc/build/changelog/unreleased_14/8903.rst b/doc/build/changelog/unreleased_14/8903.rst deleted file mode 100644 index fe1590c043f..00000000000 --- a/doc/build/changelog/unreleased_14/8903.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, sqlite - :tickets: 8903 - - Added support for the SQLite backend to reflect the "DEFERRABLE" and - "INITIALLY" keywords which may be present on a foreign key construct. Pull - request courtesy Michael Gorven. diff --git a/doc/build/changelog/unreleased_14/8945.rst b/doc/build/changelog/unreleased_14/8945.rst deleted file mode 100644 index e1b4bd6935f..00000000000 --- a/doc/build/changelog/unreleased_14/8945.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 8945 - :versions: 2.0.0b5 - - Fixed issue in Oracle compiler where the syntax for - :meth:`.FunctionElement.column_valued` was incorrect, rendering the name - ``COLUMN_VALUE`` without qualifying the source table correctly. diff --git a/doc/build/changelog/unreleased_14/8952.rst b/doc/build/changelog/unreleased_14/8952.rst deleted file mode 100644 index 8be984be0f3..00000000000 --- a/doc/build/changelog/unreleased_14/8952.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 8952 - :versions: 2.0.0b5 - - Removed non-functional ``merge()`` method from - :class:`_asyncio.AsyncResult`. This method has never worked and was - included with :class:`_asyncio.AsyncResult` in error. diff --git a/doc/build/changelog/unreleased_14/8963.rst b/doc/build/changelog/unreleased_14/8963.rst deleted file mode 100644 index 54711af5960..00000000000 --- a/doc/build/changelog/unreleased_14/8963.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 8963 - - Fixed issue where :meth:`_engine.Result.freeze` method would not work for - textual SQL using either :func:`_sql.text` or - :meth:`_engine.Connection.exec_driver_sql`. - diff --git a/doc/build/conf.py b/doc/build/conf.py index baa10ab30eb..97346f6ff2d 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.44" +release = "1.4.45" -release_date = "November 12, 2022" +release_date = "December 10, 2022" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 9920a0dd16be1325026a18dd02d5d4b89d712439 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 10 Dec 2022 13:51:43 -0500 Subject: [PATCH 472/632] Version 1.4.46 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 8fa037a695f..6ca71530aa0 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.46 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.45 :released: December 10, 2022 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index be8a8ebc077..d545406a156 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.45" +__version__ = "1.4.46" def __go(lcls): From 39299672ff7bc9f759714e2546b2fea0ab2e9990 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 11 Dec 2022 11:27:52 -0500 Subject: [PATCH 473/632] adjust for tox changes to passenv Fixed issue in tox.ini file where changes in the tox 4.0 series to the format of "passenv" caused tox to not function correctly, in particular raising an error as of tox 4.0.6. Change-Id: I659c8fc523a71deaa02a89103c9e7241cf81d831 References: https://github.com/tox-dev/tox/issues/2676 --- doc/build/changelog/unreleased_14/tox_fix.rst | 6 ++++++ tox.ini | 18 ++++++++++++++++-- 2 files changed, 22 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/tox_fix.rst diff --git a/doc/build/changelog/unreleased_14/tox_fix.rst b/doc/build/changelog/unreleased_14/tox_fix.rst new file mode 100644 index 00000000000..f37829fc370 --- /dev/null +++ b/doc/build/changelog/unreleased_14/tox_fix.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, tests + + Fixed issue in tox.ini file where changes in the tox 4.0 series to the + format of "passenv" caused tox to not function correctly, in particular + raising an error as of tox 4.0.6. diff --git a/tox.ini b/tox.ini index 923c2cf3727..f9727286590 100644 --- a/tox.ini +++ b/tox.ini @@ -54,7 +54,7 @@ deps= cov: pytest-cov -allowlist_externals=sh +allowlist_externals=sh, /bin/true # PYTHONPATH - erased so that we use the build that's present # in .tox as the SQLAlchemy library to be imported @@ -121,7 +121,21 @@ setenv= # tox as of 2.0 blocks all environment variables from the # outside, unless they are here (or in TOX_TESTENV_PASSENV, # wildcards OK). Need at least these -passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_POSTGRESQL_PY2K TOX_MYSQL TOX_MYSQL_PY2K TOX_ORACLE TOX_MSSQL TOX_SQLITE TOX_SQLITE_FILE TOX_WORKERS EXTRA_SQLITE_DRIVERS EXTRA_PG_DRIVERS EXTRA_MYSQL_DRIVERS +passenv= + ORACLE_HOME + NLS_LANG + TOX_POSTGRESQL + TOX_POSTGRESQL_PY2K + TOX_MYSQL + TOX_MYSQL_PY2K + TOX_ORACLE + TOX_MSSQL + TOX_SQLITE + TOX_SQLITE_FILE + TOX_WORKERS + EXTRA_SQLITE_DRIVERS + EXTRA_PG_DRIVERS + EXTRA_MYSQL_DRIVERS # for nocext, we rm *.so in lib in case we are doing usedevelop=True commands= From 2f279ce32235a69b9db6a0048230db9c34ff70db Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 11 Dec 2022 15:47:25 -0500 Subject: [PATCH 474/632] add color directives See https://tox.wiki/en/latest/faq.html#tox-4-known-regressions Change-Id: I3c7291a660dc167bce3151e02cd123edc4707ca1 (cherry picked from commit 184508afbb7656c4f51d98695bbeeeec9aae9b87) --- tox.ini | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tox.ini b/tox.ini index f9727286590..f007614b3b7 100644 --- a/tox.ini +++ b/tox.ini @@ -73,8 +73,11 @@ allowlist_externals=sh, /bin/true setenv= PYTHONPATH= PYTHONNOUSERSITE=1 + + PYTEST_COLOR={tty:--color=yes} + MEMUSAGE=--nomemory - BASECOMMAND=python -m pytest --rootdir {toxinidir} --log-info=sqlalchemy.testing + BASECOMMAND=python -m pytest {env:PYTEST_COLOR} --rootdir {toxinidir} --log-info=sqlalchemy.testing WORKERS={env:TOX_WORKERS:-n4 --max-worker-restart=5} @@ -156,7 +159,7 @@ deps= patch==1.* git+https://github.com/sqlalchemy/sqlalchemy2-stubs commands = - pytest test/ext/mypy/test_mypy_plugin_py3k.py {posargs} + pytest {env:PYTEST_COLOR} test/ext/mypy/test_mypy_plugin_py3k.py {posargs} # thanks to https://julien.danjou.info/the-best-flake8-extensions/ [testenv:pep8] @@ -183,7 +186,7 @@ commands = deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PYTEST_COLOR} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt # command run in the github action when cext are not active. @@ -191,5 +194,5 @@ commands= deps = {[testenv]deps} .[aiosqlite] commands= - python -m pytest {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} + python -m pytest {env:PYTEST_COLOR} {env:PY_SPECIFIC} {env:WORKERS} {env:SQLITE:} {env:POSTGRESQL:} {env:MYSQL:} {env:ORACLE:} {env:MSSQL:} {env:BACKENDONLY:} {env:IDENTS:} {env:MEMUSAGE:} {env:COVERAGE:} {posargs} oracle,mssql,sqlite_file: python reap_dbs.py db_idents.txt From 225eff40f28e943c80dbe8cd2fcd7322e1bdb816 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 12 Dec 2022 13:47:27 -0500 Subject: [PATCH 475/632] catch all BaseException in pool and revert failed checkouts Fixed a long-standing race condition in the connection pool which could occur under eventlet/gevent monkeypatching schemes in conjunction with the use of eventlet/gevent ``Timeout`` conditions, where a connection pool checkout that's interrupted due to the timeout would fail to clean up the failed state, causing the underlying connection record and sometimes the database connection itself to "leak", leaving the pool in an invalid state with unreachable entries. This issue was first identified and fixed in SQLAlchemy 1.2 for :ticket:`4225`, however the failure modes detected in that fix failed to accommodate for ``BaseException``, rather than ``Exception``, which prevented eventlet/gevent ``Timeout`` from being caught. In addition, a block within initial pool connect has also been identified and hardened with a ``BaseException`` -> "clean failed connect" block to accommodate for the same condition in this location. Big thanks to Github user @niklaus for their tenacious efforts in identifying and describing this intricate issue. Fixes: #8974 Change-Id: I95a0e1f080d0cee6f1a66977432a586fdf87f686 (cherry picked from commit a71917204dcf12a93d957a0fa29c9df97d0411ee) --- doc/build/changelog/unreleased_14/8974.rst | 19 +++++++ lib/sqlalchemy/pool/base.py | 30 +++++++++-- test/engine/test_pool.py | 62 +++++++++++++++++----- 3 files changed, 95 insertions(+), 16 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8974.rst diff --git a/doc/build/changelog/unreleased_14/8974.rst b/doc/build/changelog/unreleased_14/8974.rst new file mode 100644 index 00000000000..6400c95b452 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8974.rst @@ -0,0 +1,19 @@ +.. change:: + :tags: bug, pool + :tickets: 8974 + + Fixed a long-standing race condition in the connection pool which could + occur under eventlet/gevent monkeypatching schemes in conjunction with the + use of eventlet/gevent ``Timeout`` conditions, where a connection pool + checkout that's interrupted due to the timeout would fail to clean up the + failed state, causing the underlying connection record and sometimes the + database connection itself to "leak", leaving the pool in an invalid state + with unreachable entries. This issue was first identified and fixed in + SQLAlchemy 1.2 for :ticket:`4225`, however the failure modes detected in + that fix failed to accommodate for ``BaseException``, rather than + ``Exception``, which prevented eventlet/gevent ``Timeout`` from being + caught. In addition, a block within initial pool connect has also been + identified and hardened with a ``BaseException`` -> "clean failed connect" + block to accommodate for the same condition in this location. + Big thanks to Github user @niklaus for their tenacious efforts in + identifying and describing this intricate issue. diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index a8234c53093..dbffd54b857 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -260,10 +260,12 @@ def _close_connection(self, connection, terminate=False): self._dialect.do_terminate(connection) else: self._dialect.do_close(connection) - except Exception: + except BaseException as e: self.logger.error( "Exception closing connection %r", connection, exc_info=True ) + if not isinstance(e, Exception): + raise def _create_connection(self): """Called by subclasses to create a new ConnectionRecord.""" @@ -491,9 +493,13 @@ def checkout(cls, pool): rec = pool._do_get() try: dbapi_connection = rec.get_connection() - except Exception as err: + except BaseException as err: with util.safe_reraise(): rec._checkin_failed(err, _fairy_was_created=False) + + # never called, this is for code linters + raise + echo = pool._should_log_debug() fairy = _ConnectionFairy(dbapi_connection, rec, echo) @@ -680,7 +686,7 @@ def __connect(self): self.dbapi_connection = connection = pool._invoke_creator(self) pool.logger.debug("Created new connection %r", connection) self.fresh = True - except Exception as e: + except BaseException as e: with util.safe_reraise(): pool.logger.debug("Error on connect(): %s", e) else: @@ -907,6 +913,7 @@ def _checkout(cls, pool, threadconns=None, fairy=None): # is not accessible from a connection standpoint, those won't proceed # here. attempts = 2 + while attempts > 0: connection_is_fresh = fairy._connection_record.fresh fairy._connection_record.fresh = False @@ -959,7 +966,7 @@ def _checkout(cls, pool, threadconns=None, fairy=None): fairy.dbapi_connection = ( fairy._connection_record.get_connection() ) - except Exception as err: + except BaseException as err: with util.safe_reraise(): fairy._connection_record._checkin_failed( err, @@ -974,6 +981,21 @@ def _checkout(cls, pool, threadconns=None, fairy=None): del fairy attempts -= 1 + except BaseException as be_outer: + with util.safe_reraise(): + rec = fairy._connection_record + if rec is not None: + rec._checkin_failed( + be_outer, + _fairy_was_created=True, + ) + + # prevent _ConnectionFairy from being carried + # in the stack trace, see above + del fairy + + # never called, this is for code linters + raise pool.logger.info("Reconnection attempts exhausted on checkout") fairy.invalidate() diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 7a3b8ed58dc..2e11002efcc 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -858,18 +858,34 @@ def listen_three(*args): p2.connect() eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"]) - def test_connect_event_fails_invalidates(self): + @testing.variation("exc_type", ["plain", "base_exception"]) + def test_connect_event_fails_invalidates(self, exc_type): fail = False + if exc_type.plain: + + class RegularThing(Exception): + pass + + exc_cls = RegularThing + elif exc_type.base_exception: + + class TimeoutThing(BaseException): + pass + + exc_cls = TimeoutThing + else: + exc_type.fail() + def listen_one(conn, rec): if fail: - raise Exception("it failed") + raise exc_cls("it failed") def listen_two(conn, rec): rec.info["important_flag"] = True p1 = pool.QueuePool( - creator=MockDBAPI().connect, pool_size=1, max_overflow=0 + creator=MockDBAPI().connect, pool_size=1, max_overflow=0, timeout=5 ) event.listen(p1, "connect", listen_one) event.listen(p1, "connect", listen_two) @@ -880,7 +896,9 @@ def listen_two(conn, rec): conn.close() fail = True - assert_raises(Exception, p1.connect) + + # if the failed checkin is not reverted, the pool is blocked + assert_raises(exc_cls, p1.connect) fail = False @@ -1506,7 +1524,7 @@ def assert_no_wr_callback( return patch.object(pool, "_finalize_fairy", assert_no_wr_callback) - def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): + def _assert_cleanup_on_pooled_reconnect(self, dbapi, p, exc_cls=Exception): # p is QueuePool with size=1, max_overflow=2, # and one connection in the pool that will need to # reconnect when next used (either due to recycle or invalidate) @@ -1515,7 +1533,7 @@ def _assert_cleanup_on_pooled_reconnect(self, dbapi, p): eq_(p.checkedout(), 0) eq_(p._overflow, 0) dbapi.shutdown(True) - assert_raises_context_ok(Exception, p.connect) + assert_raises_context_ok(exc_cls, p.connect) eq_(p._overflow, 0) eq_(p.checkedout(), 0) # and not 1 @@ -1633,18 +1651,38 @@ def checkout(conn, conn_rec, conn_f): c = p.connect() c.close() - def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self): + @testing.variation("exc_type", ["plain", "base_exception"]) + def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self, exc_type): dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2) c1 = p.connect() c1.close() - @event.listens_for(p, "checkout") - def handle_checkout_event(dbapi_con, con_record, con_proxy): - if dbapi.is_shutdown: - raise tsa.exc.DisconnectionError() + if exc_type.plain: - self._assert_cleanup_on_pooled_reconnect(dbapi, p) + @event.listens_for(p, "checkout") + def handle_checkout_event(dbapi_con, con_record, con_proxy): + if dbapi.is_shutdown: + raise tsa.exc.DisconnectionError() + + elif exc_type.base_exception: + + class TimeoutThing(BaseException): + pass + + @event.listens_for(p, "checkout") + def handle_checkout_event(dbapi_con, con_record, con_proxy): + if dbapi.is_shutdown: + raise TimeoutThing() + + else: + exc_type.fail() + + self._assert_cleanup_on_pooled_reconnect( + dbapi, + p, + exc_cls=TimeoutThing if exc_type.base_exception else Exception, + ) @testing.combinations((True, testing.requires.python3), (False,)) def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced): From 7b150d43ce04a62ee83159855bdc4452ea02617a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 12 Dec 2022 18:05:07 -0500 Subject: [PATCH 476/632] check index_list pragma for number of columns returned Fixed regression caused by new support for reflection of partial indexes on SQLite added in 1.4.45 for :ticket:`8804`, where the ``index_list`` pragma command in very old versions of SQLite (possibly prior to 3.8.9) does not return the current expected number of columns, leading to exceptions raised when reflecting tables and indexes. Fixes: #8969 Change-Id: If317cdcfc6782f7e180df329b6ea0ddb48ce2269 (cherry picked from commit e026a0f3562bec5fbc18e223176be8121c147193) --- doc/build/changelog/unreleased_14/8969.rst | 10 +++++++++ lib/sqlalchemy/dialects/sqlite/base.py | 2 +- test/dialect/test_sqlite.py | 1 + test/requirements.py | 24 ++++++++++++++++++++++ 4 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8969.rst diff --git a/doc/build/changelog/unreleased_14/8969.rst b/doc/build/changelog/unreleased_14/8969.rst new file mode 100644 index 00000000000..8458706c809 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8969.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, sqlite + :tickets: 8969 + :versions: 2.0.0b5 + + Fixed regression caused by new support for reflection of partial indexes on + SQLite added in 1.4.45 for :ticket:`8804`, where the ``index_list`` pragma + command in very old versions of SQLite (possibly prior to 3.8.9) does not + return the current expected number of columns, leading to exceptions raised + when reflecting tables and indexes. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index f75610553cb..56294f4bd54 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2518,7 +2518,7 @@ def get_indexes(self, connection, table_name, schema=None, **kw): ) # check partial indexes - if row[4]: + if len(row) >= 5 and row[4]: s = ( "SELECT sql FROM %(schema)ssqlite_master " "WHERE name = ? " diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 01ba4164803..418bf9c6575 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -2392,6 +2392,7 @@ def test_create_index_with_schema(self): ], ) + @testing.requires.sqlite_partial_indexes def test_reflect_partial_indexes(self, connection): connection.exec_driver_sql( "create table foo_with_partial_index (x integer, y integer)" diff --git a/test/requirements.py b/test/requirements.py index 55c3383a42f..fa9ba88f58f 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1128,6 +1128,30 @@ def _sqlite_json(self, config): def sqlite_memory(self): return only_on(self._sqlite_memory_db) + def _sqlite_partial_idx(self, config): + if not against(config, "sqlite"): + return False + else: + with config.db.connect() as conn: + connection = conn.connection + cursor = connection.cursor() + try: + cursor.execute("SELECT * FROM pragma_index_info('idx52')") + except: + return False + else: + return ( + cursor.description is not None + and len(cursor.description) >= 3 + ) + finally: + cursor.close() + + @property + def sqlite_partial_indexes(self): + + return only_on(self._sqlite_partial_idx) + @property def reflects_json_type(self): return only_on( From 84ba8874e146bcdbf46ce70ece32c4c224c3fd44 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 15 Dec 2022 10:22:36 -0500 Subject: [PATCH 477/632] implement literal_binds with expanding + bind_expression Fixed bug where SQL compilation would fail (assertion fail in 2.0, NoneType error in 1.4) when using an expression whose type included :meth:`_types.TypeEngine.bind_expression`, in the context of an "expanding" (i.e. "IN") parameter in conjunction with the ``literal_binds`` compiler parameter. Fixes: #8989 Change-Id: Ic9fd27b46381b488117295ea5a492d8fc158e39f (cherry picked from commit 8c6de3c2c43ab372cbbe76464b4c5be3b6457252) --- doc/build/changelog/unreleased_14/8989.rst | 10 ++++ lib/sqlalchemy/sql/compiler.py | 69 +++++++++++++++++++--- test/sql/test_type_expressions.py | 48 +++++++++------ 3 files changed, 100 insertions(+), 27 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8989.rst diff --git a/doc/build/changelog/unreleased_14/8989.rst b/doc/build/changelog/unreleased_14/8989.rst new file mode 100644 index 00000000000..4c38fdf0190 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8989.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, types + :tickets: 8989 + :versions: 2.0.0b5 + + Fixed bug where SQL compilation would fail (assertion fail in 2.0, NoneType + error in 1.4) when using an expression whose type included + :meth:`_types.TypeEngine.bind_expression`, in the context of an "expanding" + (i.e. "IN") parameter in conjunction with the ``literal_binds`` compiler + parameter. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 8fbf3092aaf..cb30c777389 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -699,6 +699,8 @@ class SQLCompiler(Compiled): """ + _post_compile_pattern = re.compile(r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]") + positiontup = None """for a compiled construct that uses a positional paramstyle, will be a sequence of strings, indicating the names of bound parameters in order. @@ -1294,7 +1296,7 @@ def process_expanding(m): return expr statement = re.sub( - r"__\[POSTCOMPILE_(\S+?)(~~.+?~~)?\]", + self._post_compile_pattern, process_expanding, self.string, ) @@ -2094,12 +2096,16 @@ def visit_empty_set_expr(self, element_types): ) def _literal_execute_expanding_parameter_literal_binds( - self, parameter, values + self, parameter, values, bind_expression_template=None ): typ_dialect_impl = parameter.type._unwrapped_dialect_impl(self.dialect) if not values: + # empty IN expression. note we don't need to use + # bind_expression_template here because there are no + # expressions to render. + if typ_dialect_impl._is_tuple_type: replacement_expression = ( "VALUES " if self.dialect.tuple_in_values else "" @@ -2120,6 +2126,12 @@ def _literal_execute_expanding_parameter_literal_binds( ) ): + if typ_dialect_impl._has_bind_expression: + raise NotImplementedError( + "bind_expression() on TupleType not supported with " + "literal_binds" + ) + replacement_expression = ( "VALUES " if self.dialect.tuple_in_values else "" ) + ", ".join( @@ -2135,10 +2147,29 @@ def _literal_execute_expanding_parameter_literal_binds( for i, tuple_element in enumerate(values) ) else: - replacement_expression = ", ".join( - self.render_literal_value(value, parameter.type) - for value in values - ) + if bind_expression_template: + post_compile_pattern = self._post_compile_pattern + m = post_compile_pattern.search(bind_expression_template) + assert m and m.group( + 2 + ), "unexpected format for expanding parameter" + + tok = m.group(2).split("~~") + be_left, be_right = tok[1], tok[3] + replacement_expression = ", ".join( + "%s%s%s" + % ( + be_left, + self.render_literal_value(value, parameter.type), + be_right, + ) + for value in values + ) + else: + replacement_expression = ", ".join( + self.render_literal_value(value, parameter.type) + for value in values + ) return (), replacement_expression @@ -2453,7 +2484,7 @@ def visit_bindparam( bind_expression, skip_bind_expression=True, within_columns_clause=within_columns_clause, - literal_binds=literal_binds, + literal_binds=literal_binds and not bindparam.expanding, literal_execute=literal_execute, render_postcompile=render_postcompile, **kwargs @@ -2461,14 +2492,26 @@ def visit_bindparam( if bindparam.expanding: # for postcompile w/ expanding, move the "wrapped" part # of this into the inside + m = re.match( r"^(.*)\(__\[POSTCOMPILE_(\S+?)\]\)(.*)$", wrapped ) + assert m, "unexpected format for expanding parameter" wrapped = "(__[POSTCOMPILE_%s~~%s~~REPL~~%s~~])" % ( m.group(2), m.group(1), m.group(3), ) + + if literal_binds: + ret = self.render_literal_bindparam( + bindparam, + within_columns_clause=True, + bind_expression_template=wrapped, + **kwargs + ) + return "(%s)" % ret + return wrapped if not literal_binds: @@ -2568,7 +2611,11 @@ def visit_bindparam( return ret def render_literal_bindparam( - self, bindparam, render_literal_value=NO_ARG, **kw + self, + bindparam, + render_literal_value=NO_ARG, + bind_expression_template=None, + **kw ): if render_literal_value is not NO_ARG: value = render_literal_value @@ -2587,7 +2634,11 @@ def render_literal_bindparam( if bindparam.expanding: leep = self._literal_execute_expanding_parameter_literal_binds - to_update, replacement_expr = leep(bindparam, value) + to_update, replacement_expr = leep( + bindparam, + value, + bind_expression_template=bind_expression_template, + ) return replacement_expr else: return self.render_literal_value(value, bindparam.type) diff --git a/test/sql/test_type_expressions.py b/test/sql/test_type_expressions.py index e0e0858a450..7c219262079 100644 --- a/test/sql/test_type_expressions.py +++ b/test/sql/test_type_expressions.py @@ -182,28 +182,40 @@ def test_select_binds(self): "test_table WHERE test_table.y = lower(:y_1)", ) - def test_in_binds(self): + @testing.variation( + "compile_opt", ["plain", "postcompile", "literal_binds"] + ) + def test_in_binds(self, compile_opt): table = self._fixture() - self.assert_compile( - select(table).where( - table.c.y.in_(["hi", "there", "some", "expr"]) - ), - "SELECT test_table.x, lower(test_table.y) AS y FROM " - "test_table WHERE test_table.y IN " - "(__[POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", - render_postcompile=False, + stmt = select(table).where( + table.c.y.in_(["hi", "there", "some", "expr"]) ) - self.assert_compile( - select(table).where( - table.c.y.in_(["hi", "there", "some", "expr"]) - ), - "SELECT test_table.x, lower(test_table.y) AS y FROM " - "test_table WHERE test_table.y IN " - "(lower(:y_1_1), lower(:y_1_2), lower(:y_1_3), lower(:y_1_4))", - render_postcompile=True, - ) + if compile_opt.plain: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(__[POSTCOMPILE_y_1~~lower(~~REPL~~)~~])", + render_postcompile=False, + ) + elif compile_opt.postcompile: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(lower(:y_1_1), lower(:y_1_2), lower(:y_1_3), lower(:y_1_4))", + render_postcompile=True, + ) + elif compile_opt.literal_binds: + self.assert_compile( + stmt, + "SELECT test_table.x, lower(test_table.y) AS y FROM " + "test_table WHERE test_table.y IN " + "(lower('hi'), lower('there'), lower('some'), lower('expr'))", + literal_binds=True, + ) def test_dialect(self): table = self._fixture() From f5905a012ed539286f33b10559b832da7babe1c5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 16 Dec 2022 14:05:48 -0500 Subject: [PATCH 478/632] dont call platform.architecture() Fixed regression where the base compat module was calling upon ``platform.architecture()`` in order to detect some system properties, which results in an over-broad system call against the system-level ``file`` call that is unavailable under some circumstances, including within some secure environment configurations. Fixes: #8995 Change-Id: Ib6171e75aff5a60a79dab81a0be21bee2456318b (cherry picked from commit e852362bfdf9a18dfd91137f4a2d7c2dfee30082) --- doc/build/changelog/unreleased_14/8995.rst | 9 +++++++++ lib/sqlalchemy/util/compat.py | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8995.rst diff --git a/doc/build/changelog/unreleased_14/8995.rst b/doc/build/changelog/unreleased_14/8995.rst new file mode 100644 index 00000000000..5191b58de9a --- /dev/null +++ b/doc/build/changelog/unreleased_14/8995.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, installation + :tickets: 8995 + + Fixed regression where the base compat module was calling upon + ``platform.architecture()`` in order to detect some system properties, + which results in an over-broad system call against the system-level + ``file`` call that is unavailable under some circumstances, including + within some secure environment configurations. diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 2b5a2c0ef42..460d7161c5a 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -29,7 +29,7 @@ win32 = sys.platform.startswith("win") osx = sys.platform.startswith("darwin") arm = "aarch" in platform.machine().lower() -is64bit = platform.architecture()[0] == "64bit" +is64bit = sys.maxsize > 2 ** 32 has_refcount_gc = bool(cpython) From deef63c190a423ed7a6d340df17a6318492c1fb2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 19 Dec 2022 08:34:51 -0500 Subject: [PATCH 479/632] add exclusion for unusual chars in column names Added new exclusion rule for third party dialects called ``unusual_column_name_characters``, which can be "closed" for third party dialects that don't support column names with unusual characters such as dots, slashes, or percent signs in them, even if the name is properly quoted. Fixes: #9002 Change-Id: I44b765df4c73ce5ec1907d031fd9c89761fd99d1 References: #8993 (cherry picked from commit 946058ec6070ab4db9fdfab612ec4543fea9cd1c) --- doc/build/changelog/unreleased_14/9002.rst | 10 ++++++++++ lib/sqlalchemy/testing/requirements.py | 11 +++++++++++ lib/sqlalchemy/testing/suite/test_dialect.py | 1 + 3 files changed, 22 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/9002.rst diff --git a/doc/build/changelog/unreleased_14/9002.rst b/doc/build/changelog/unreleased_14/9002.rst new file mode 100644 index 00000000000..c2d3f01b9f8 --- /dev/null +++ b/doc/build/changelog/unreleased_14/9002.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, tests + :tickets: 9002 + + Added new exclusion rule for third party dialects called + ``unusual_column_name_characters``, which can be "closed" for third party + dialects that don't support column names with unusual characters such as + dots, slashes, or percent signs in them, even if the name is properly + quoted. + diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 8cd4c64f27b..e225512889f 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -152,6 +152,17 @@ def implicitly_named_constraints(self): return exclusions.open() + @property + def unusual_column_name_characters(self): + """target database allows column names that have unusual characters + in them, such as dots, spaces, slashes, or percent signs. + + The column names are as always in such a case quoted, however the + DB still needs to support those characters in the name somehow. + + """ + return exclusions.open() + @property def subqueries(self): """Target database must support subqueries.""" diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index 99947bbe4f5..c1c0856c325 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -343,6 +343,7 @@ class DifficultParametersTest(fixtures.TestBase): ) @tough_parameters + @config.requirements.unusual_column_name_characters def test_round_trip_same_named_column( self, paramname, connection, metadata ): From 074c1471958e155e79a3392ec65dc629012119c5 Mon Sep 17 00:00:00 2001 From: asimfarooq5 Date: Wed, 14 Dec 2022 15:32:47 -0500 Subject: [PATCH 480/632] Add MACCADDR8 for PGCompiler Add MACCADDR8 for PGCompiler Closes: #8393 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/8393 Pull-request-sha: 837a68eba3e31e0acbb7c47ee87bca4e9def7648 Change-Id: I87e4999eb8d82662ff8ab409c98dc57edd7fd271 (cherry picked from commit 33f15740a0b72bae64fc2c2f6d0f9724cfe9164a) --- doc/build/changelog/changelog_14.rst | 2 +- doc/build/changelog/unreleased_14/8393.rst | 7 +++++++ doc/build/dialects/postgresql.rst | 3 +++ lib/sqlalchemy/dialects/postgresql/__init__.py | 2 ++ lib/sqlalchemy/dialects/postgresql/base.py | 12 ++++++++++++ test/dialect/postgresql/test_types.py | 1 + 6 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/8393.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 6ca71530aa0..ef40bcc3704 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -719,7 +719,7 @@ This document details individual issue-level changes made throughout :tickets: 8196 Fixed a crash of the mypy plugin when using a lambda as a Column - default. Pull request curtesy of tchapi. + default. Pull request courtesy of tchapi. .. change:: diff --git a/doc/build/changelog/unreleased_14/8393.rst b/doc/build/changelog/unreleased_14/8393.rst new file mode 100644 index 00000000000..fab9eb04c7a --- /dev/null +++ b/doc/build/changelog/unreleased_14/8393.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, postgresql + :tickets: 8393 + :versions: 2.0.0b5 + + Added the PostgreSQL type ``MACADDR8``. + Pull request courtesy of Asim Farooq. diff --git a/doc/build/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst index 4e8fb98d95e..c591ab00066 100644 --- a/doc/build/dialects/postgresql.rst +++ b/doc/build/dialects/postgresql.rst @@ -31,6 +31,7 @@ they originate from :mod:`sqlalchemy.types` or from the local dialect:: JSON, JSONB, MACADDR, + MACADDR8, MONEY, NUMERIC, OID, @@ -110,6 +111,8 @@ construction arguments, are as follows: .. autoclass:: MACADDR +.. autoclass:: MACADDR8 + .. autoclass:: MONEY .. autoclass:: OID diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 12d9e94443d..262e160d8d0 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -30,6 +30,7 @@ from .base import INTEGER from .base import INTERVAL from .base import MACADDR +from .base import MACADDR8 from .base import MONEY from .base import NUMERIC from .base import OID @@ -80,6 +81,7 @@ "UUID", "BIT", "MACADDR", + "MACADDR8", "MONEY", "OID", "REGCLASS", diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 9ad8379e26b..aceec887e17 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1679,6 +1679,13 @@ class MACADDR(sqltypes.TypeEngine): PGMacAddr = MACADDR +class MACADDR8(sqltypes.TypeEngine): + __visit_name__ = "MACADDR8" + + +PGMacAddr8 = MACADDR8 + + class MONEY(sqltypes.TypeEngine): r"""Provide the PostgreSQL MONEY type. @@ -2232,6 +2239,7 @@ def __init__(self, expression, type_): sqltypes.JSON: _json.JSON, } + ischema_names = { "_array": _array.ARRAY, "hstore": _hstore.HSTORE, @@ -2260,6 +2268,7 @@ def __init__(self, expression, type_): "bit": BIT, "bit varying": BIT, "macaddr": MACADDR, + "macaddr8": MACADDR8, "money": MONEY, "oid": OID, "regclass": REGCLASS, @@ -3007,6 +3016,9 @@ def visit_CIDR(self, type_, **kw): def visit_MACADDR(self, type_, **kw): return "MACADDR" + def visit_MACADDR8(self, type_, **kw): + return "MACADDR8" + def visit_MONEY(self, type_, **kw): return "MONEY" diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index 564554f668f..1a5cdb6474d 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -2719,6 +2719,7 @@ def get_col_spec(self): Column("bitstring", postgresql.BIT(4)), Column("addr", postgresql.INET), Column("addr2", postgresql.MACADDR), + Column("addr4", postgresql.MACADDR8), Column("price", postgresql.MONEY), Column("addr3", postgresql.CIDR), Column("doubleprec", postgresql.DOUBLE_PRECISION), From 5e56b180becf010cc1b912fe41997f36b5ddc361 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 19 Dec 2022 15:15:35 -0500 Subject: [PATCH 481/632] add joins_implicitly to column_valued() Added parameter :paramref:`.FunctionElement.column_valued.joins_implicitly`, which is useful in preventing the "cartesian product" warning when making use of table-valued or column-valued functions. This parameter was already introduced for :meth:`.FunctionElement.table_valued` in :ticket:`7845`, however it failed to be added for :meth:`.FunctionElement.column_valued` as well. Fixes: #9009 Change-Id: Ifb72fbcb4f4d2998e730d6f85ec7280df3bf3d47 (cherry picked from commit 567878e5c67d08c561dd064fe6dc25e4db7349e7) --- doc/build/changelog/unreleased_14/9009.rst | 12 +++++ lib/sqlalchemy/sql/functions.py | 14 +++++- test/sql/test_from_linter.py | 54 +++++++++++++++------- 3 files changed, 61 insertions(+), 19 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9009.rst diff --git a/doc/build/changelog/unreleased_14/9009.rst b/doc/build/changelog/unreleased_14/9009.rst new file mode 100644 index 00000000000..9520b3e133c --- /dev/null +++ b/doc/build/changelog/unreleased_14/9009.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, sql + :tickets: 9009 + :versions: 2.0.0b5 + + Added parameter + :paramref:`.FunctionElement.column_valued.joins_implicitly`, which is + useful in preventing the "cartesian product" warning when making use of + table-valued or column-valued functions. This parameter was already + introduced for :meth:`.FunctionElement.table_valued` in :ticket:`7845`, + however it failed to be added for :meth:`.FunctionElement.column_valued` + as well. diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 2b264e5bf96..96f2a3accfa 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -255,7 +255,7 @@ def table_valued(self, *expr, **kw): return new_func.alias(name=name, joins_implicitly=joins_implicitly) - def column_valued(self, name=None): + def column_valued(self, name=None, joins_implicitly=False): """Return this :class:`_functions.FunctionElement` as a column expression that selects from itself as a FROM clause. @@ -271,6 +271,16 @@ def column_valued(self, name=None): gs = func.generate_series(1, 5, -1).alias().column + :param name: optional name to assign to the alias name that's generated. + If omitted, a unique anonymizing name is used. + + :param joins_implicitly: when True, the "table" portion of the column + valued function may be a member of the FROM clause without any + explicit JOIN to other tables in the SQL query, and no "cartesian + product" warning will be generated. May be useful for SQL functions + such as ``func.json_array_elements()``. + + .. versionadded:: 1.4.46 .. seealso:: @@ -282,7 +292,7 @@ def column_valued(self, name=None): """ # noqa: 501 - return self.alias(name=name).column + return self.alias(name=name, joins_implicitly=joins_implicitly).column @property def columns(self): diff --git a/test/sql/test_from_linter.py b/test/sql/test_from_linter.py index 1fa3aff360f..49370b1e67e 100644 --- a/test/sql/test_from_linter.py +++ b/test/sql/test_from_linter.py @@ -165,16 +165,16 @@ def test_lateral_subqueries_ok_do_we_still_find_cartesians(self): assert start is p3 assert froms == {p1} - @testing.combinations( - "render_derived", "alias", None, argnames="additional_transformation" + @testing.variation("additional_transformation", ["alias", "none"]) + @testing.variation("joins_implicitly", [True, False]) + @testing.variation( + "type_", ["table_valued", "table_valued_derived", "column_valued"] ) - @testing.combinations(True, False, argnames="joins_implicitly") - def test_table_valued( - self, - joins_implicitly, - additional_transformation, + def test_fn_valued( + self, joins_implicitly, additional_transformation, type_ ): - """test #7845""" + """test #7845, #9009""" + my_table = table( "tbl", column("id", Integer), @@ -183,25 +183,45 @@ def test_table_valued( sub_dict = my_table.c.data["d"] - tv = func.json_each(sub_dict) + if type_.table_valued or type_.table_valued_derived: + tv = func.json_each(sub_dict) + + tv = tv.table_valued("key", joins_implicitly=joins_implicitly) + + if type_.table_valued_derived: + tv = tv.render_derived(name="tv", with_types=True) + + if additional_transformation.alias: + tv = tv.alias() + + has_key = tv.c.key == "f" + stmt = select(my_table.c.id).where(has_key) + elif type_.column_valued: + tv = func.json_array_elements(sub_dict) - tv = tv.table_valued("key", joins_implicitly=joins_implicitly) + if additional_transformation.alias: + tv = tv.alias(joins_implicitly=joins_implicitly).column + else: + tv = tv.column_valued("key", joins_implicitly=joins_implicitly) - if additional_transformation == "render_derived": - tv = tv.render_derived(name="tv", with_types=True) - elif additional_transformation == "alias": - tv = tv.alias() + stmt = select(my_table.c.id, tv) + else: + type_.fail() - has_key = tv.c.key == "f" - stmt = select(my_table.c.id).where(has_key) froms, start = find_unmatching_froms(stmt, my_table) if joins_implicitly: is_(start, None) is_(froms, None) - else: + elif type_.column_valued: + assert start == my_table + assert froms == {tv.scalar_alias} + + elif type_.table_valued or type_.table_valued_derived: assert start == my_table assert froms == {tv} + else: + type_.fail() def test_count_non_eq_comparison_operators(self): query = select(self.a).where(self.a.c.col_a > self.b.c.col_b) From 667e2e6d62dcdff8e08eec6dd76c25815dfc03cd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 22 Dec 2022 18:14:31 -0500 Subject: [PATCH 482/632] expand out Index if passed to "constraint" Fixed bug where the PostgreSQL :paramref:`_postgresql.OnConflictClause.constraint` parameter would accept an :class:`.Index` object, however would not expand this index out into its individual index expressions, instead rendering its name in an ON CONFLICT ON CONSTRAINT clause, which is not accepted by PostgreSQL; the "constraint name" form only accepts unique or exclude constraint names. The parameter continues to accept the index but now expands it out into its component expressions for the render. Fixes: #9023 Change-Id: I6baf243e26bfe578bf3f193c162dd7a623b6ede9 (cherry picked from commit 5cc18bb80077e98418b4a8066c0bc628209f3ada) --- doc/build/changelog/unreleased_14/9023.rst | 12 +++++++ lib/sqlalchemy/dialects/postgresql/dml.py | 2 +- test/dialect/postgresql/test_on_conflict.py | 39 +++++++++++++++++++++ 3 files changed, 52 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/9023.rst diff --git a/doc/build/changelog/unreleased_14/9023.rst b/doc/build/changelog/unreleased_14/9023.rst new file mode 100644 index 00000000000..d17a0cc983c --- /dev/null +++ b/doc/build/changelog/unreleased_14/9023.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: bug, postgresql + :tickets: 9023 + + Fixed bug where the PostgreSQL + :paramref:`_postgresql.OnConflictClause.constraint` parameter would accept + an :class:`.Index` object, however would not expand this index out into its + individual index expressions, instead rendering its name in an ON CONFLICT + ON CONSTRAINT clause, which is not accepted by PostgreSQL; the "constraint + name" form only accepts unique or exclude constraint names. The parameter + continues to accept the index but now expands it out into its component + expressions for the render. diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index b483774db3e..e7b126b3eb5 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -188,7 +188,7 @@ def __init__(self, constraint=None, index_elements=None, index_where=None): if constraint is not None: if not isinstance(constraint, util.string_types) and isinstance( constraint, - (schema.Index, schema.Constraint, ext.ExcludeConstraint), + (schema.Constraint, ext.ExcludeConstraint), ): constraint = getattr(constraint, "name") or constraint diff --git a/test/dialect/postgresql/test_on_conflict.py b/test/dialect/postgresql/test_on_conflict.py index 508f691c514..ab46342f5fc 100644 --- a/test/dialect/postgresql/test_on_conflict.py +++ b/test/dialect/postgresql/test_on_conflict.py @@ -675,6 +675,45 @@ def test_on_conflict_do_update_exotic_targets_six(self, connection): [(1, "name1", "mail2@gmail.com", "unique_name")], ) + def test_on_conflict_do_update_constraint_can_be_index(self, connection): + """test #9023""" + + users = self.tables.users_xtra + + connection.execute( + insert(users), + dict( + id=1, + name="name1", + login_email="mail1@gmail.com", + lets_index_this="unique_name", + ), + ) + + i = insert(users) + i = i.on_conflict_do_update( + constraint=self.unique_partial_index, + set_=dict( + name=i.excluded.name, login_email=i.excluded.login_email + ), + ) + + connection.execute( + i, + [ + dict( + name="name1", + login_email="mail2@gmail.com", + lets_index_this="unique_name", + ) + ], + ) + + eq_( + connection.execute(users.select()).fetchall(), + [(1, "name1", "mail2@gmail.com", "unique_name")], + ) + def test_on_conflict_do_update_no_row_actually_affected(self, connection): users = self.tables.users_xtra From 4c80fad2968c7a5953809a9c030886a03151a514 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 27 Dec 2022 12:29:38 -0500 Subject: [PATCH 483/632] pass more contextual information to PyWrapper param create Fixed issue in lambda SQL feature where the calculated type of a literal value would not take into account the type coercion rules of the "compared to type", leading to a lack of typing information for SQL expressions, such as comparisons to :class:`.JSON` elements and similar. Fixes: #9029 Change-Id: I381c8d7458d98ba762313dee9ec47a9c1881f74a (cherry picked from commit f63d7e33ec785a5ea4fbc77963c537be26b8419b) --- doc/build/changelog/unreleased_14/9029.rst | 8 ++ lib/sqlalchemy/sql/coercions.py | 9 +- lib/sqlalchemy/sql/lambdas.py | 15 +-- regen_callcounts.tox.ini | 10 +- test/profiles.txt | 82 ++++++++--------- test/sql/test_lambdas.py | 102 +++++++++++++++++++++ 6 files changed, 177 insertions(+), 49 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9029.rst diff --git a/doc/build/changelog/unreleased_14/9029.rst b/doc/build/changelog/unreleased_14/9029.rst new file mode 100644 index 00000000000..38114e9637c --- /dev/null +++ b/doc/build/changelog/unreleased_14/9029.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, sql + :tickets: 9029 + + Fixed issue in lambda SQL feature where the calculated type of a literal + value would not take into account the type coercion rules of the "compared + to type", leading to a lack of typing information for SQL expressions, such + as comparisons to :class:`.JSON` elements and similar. diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index 8cc73cb5c5a..ede488915ec 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -141,7 +141,12 @@ def expect( if not isinstance( element, - (elements.ClauseElement, schema.SchemaItem, schema.FetchedValue), + ( + elements.ClauseElement, + schema.SchemaItem, + schema.FetchedValue, + lambdas.PyWrapper, + ), ): resolved = None @@ -190,6 +195,8 @@ def expect( ) else: resolved = element + elif isinstance(element, lambdas.PyWrapper): + resolved = element._sa__py_wrapper_literal(**kw) else: resolved = element if ( diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 584efe4c688..236427d9df6 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -18,7 +18,6 @@ from . import roles from . import schema from . import traversals -from . import type_api from . import visitors from .base import _clone from .base import Options @@ -1215,11 +1214,11 @@ def __call__(self, *arg, **kw): return value def operate(self, op, *other, **kwargs): - elem = object.__getattribute__(self, "__clause_element__")() + elem = object.__getattribute__(self, "_py_wrapper_literal")() return op(elem, *other, **kwargs) def reverse_operate(self, op, other, **kwargs): - elem = object.__getattribute__(self, "__clause_element__")() + elem = object.__getattribute__(self, "_py_wrapper_literal")() return op(other, elem, **kwargs) def _extract_bound_parameters(self, starting_point, result_list): @@ -1232,16 +1231,19 @@ def _extract_bound_parameters(self, starting_point, result_list): element = getter(starting_point) pywrapper._sa__extract_bound_parameters(element, result_list) - def __clause_element__(self): + def _py_wrapper_literal(self, expr=None, operator=None, **kw): param = object.__getattribute__(self, "_param") to_evaluate = object.__getattribute__(self, "_to_evaluate") if param is None: name = object.__getattribute__(self, "_name") self._param = param = elements.BindParameter( - name, required=False, unique=True + name, + required=False, + unique=True, + _compared_to_operator=operator, + _compared_to_type=expr.type if expr is not None else None, ) self._has_param = True - param.type = type_api._resolve_value_to_type(to_evaluate) return param._with_value(to_evaluate, maintain_key=True) def __bool__(self): @@ -1259,6 +1261,7 @@ def __getattribute__(self, key): "__clause_element__", "operate", "reverse_operate", + "_py_wrapper_literal", "__class__", "__dict__", ): diff --git a/regen_callcounts.tox.ini b/regen_callcounts.tox.ini index 80d88aa4544..0379b1cfe8d 100644 --- a/regen_callcounts.tox.ini +++ b/regen_callcounts.tox.ini @@ -21,7 +21,15 @@ commands= db_{oracle}: {env:BASECOMMAND} {env:ORACLE:} {posargs} db_{mssql}: {env:BASECOMMAND} {env:MSSQL:} {posargs} -passenv=ORACLE_HOME NLS_LANG TOX_POSTGRESQL TOX_MYSQL TOX_ORACLE TOX_MSSQL TOX_SQLITE TOX_WORKERS +passenv= + ORACLE_HOME + NLS_LANG + TOX_POSTGRESQL + TOX_MYSQL + TOX_ORACLE + TOX_MSSQL + TOX_SQLITE + TOX_WORKERS # -E : ignore PYTHON* environment variables (such as PYTHONPATH) # -s : don't add user site directory to sys.path; also PYTHONNOUSERSITE diff --git a/test/profiles.txt b/test/profiles.txt index ab19468b37d..5c285f4f453 100644 --- a/test/profiles.txt +++ b/test/profiles.txt @@ -165,8 +165,8 @@ test.aaa_profiling.test_misc.EnumTest.test_create_enum_from_pep_435_w_expensive_ # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 46635 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56845 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 47035 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 57245 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 50335 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61445 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 50335 @@ -174,8 +174,8 @@ test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_w_annotation x86_6 # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45735 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 55945 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 45835 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 56045 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 49435 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 60545 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 49435 @@ -183,8 +183,8 @@ test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_bundle_wo_annotation x86_ # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50835 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58545 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 51135 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58845 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53935 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 62545 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53935 @@ -192,8 +192,8 @@ test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_w_annotations x86_ # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 49935 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 57645 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 50335 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 58045 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 53035 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 61645 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 53035 @@ -219,8 +219,8 @@ test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_w_annotations x # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 44335 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52045 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 44435 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 52145 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 47435 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 56045 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_bundle_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 47435 @@ -237,8 +237,8 @@ test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_w_annotations x # TEST: test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 30905 -test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33505 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 31005 +test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 33605 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 33705 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 36605 test.aaa_profiling.test_orm.AnnotatedOverheadTest.test_no_entity_wo_annotations x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 33705 @@ -282,8 +282,8 @@ test.aaa_profiling.test_orm.BranchedOptionTest.test_query_opts_unbound_branching # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15162 -test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26175 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 15246 +test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26259 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 15190 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27207 test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 15190 @@ -291,8 +291,8 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_baseline x86_64_linux_cpython_ # TEST: test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21303 -test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26316 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 21291 +test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 26304 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 21344 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 27361 test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 21344 @@ -300,8 +300,8 @@ test.aaa_profiling.test_orm.DeferOptionsTest.test_defer_many_cols x86_64_linux_c # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 9853 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 10003 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 9953 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 10153 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 10304 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 10454 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 10304 @@ -318,8 +318,8 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_aliased_select_join x8 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 4053 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 4203 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 4153 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 4353 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 4054 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 4204 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 4054 @@ -327,8 +327,8 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_b_plain x86_64_linux_cpy # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 96088 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 96238 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 99338 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 99738 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103689 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103839 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103689 @@ -336,8 +336,8 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d x86_64_linux_cpython_3 # TEST: test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 94138 -test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 94288 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 97288 +test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 97688 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 102039 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 102189 test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 102039 @@ -345,8 +345,8 @@ test.aaa_profiling.test_orm.JoinConditionTest.test_a_to_d_aliased x86_64_linux_c # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 495703 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 497535 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 497722 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 499549 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 527563 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 529405 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 527563 @@ -354,8 +354,8 @@ test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_build_query x86_64_linux_cp # TEST: test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425705 -test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 443305 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 425305 +test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 442905 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 430805 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 449905 test.aaa_profiling.test_orm.JoinedEagerLoadTest.test_fetch_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 430205 @@ -372,8 +372,8 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ # TEST: test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 103486 -test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 108243 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 104575 +test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 109332 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 107759 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 113767 test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_no_identity x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 107759 @@ -381,8 +381,8 @@ test.aaa_profiling.test_orm.LoadManyToOneFromIdentityTest.test_many_to_one_load_ # TEST: test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 19841 -test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20287 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 20043 +test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 20497 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 20731 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 21299 test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 20731 @@ -390,8 +390,8 @@ test.aaa_profiling.test_orm.MergeBackrefsTest.test_merge_pending_with_all_pks x8 # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_load -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1396 -test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1436 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1414 +test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1454 test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1460 test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1511 test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 1460 @@ -399,8 +399,8 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_load x86_64_linux_cpython_3.9_s # TEST: test.aaa_profiling.test_orm.MergeTest.test_merge_no_load -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 101,17 -test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 101,17 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 107,18 +test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 107,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 103,18 test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 103,18 @@ -408,8 +408,8 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load x86_64_linux_cpython_3. # TEST: test.aaa_profiling.test_orm.QueryTest.test_query_cols -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5842 -test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6602 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 5936 +test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 6696 test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 6150 test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 6940 test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 6150 @@ -417,8 +417,8 @@ test.aaa_profiling.test_orm.QueryTest.test_query_cols x86_64_linux_cpython_3.9_s # TEST: test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 253005 -test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 271105 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 264805 +test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 282905 test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 263605 test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 283105 test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64_linux_cpython_3.9_sqlite_pysqlite_dbapiunicode_cextensions 263605 @@ -426,7 +426,7 @@ test.aaa_profiling.test_orm.SelectInEagerLoadTest.test_round_trip_results x86_64 # TEST: test.aaa_profiling.test_orm.SessionTest.test_expire_lots -test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1141 +test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_cextensions 1158 test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_2.7_sqlite_pysqlite_dbapiunicode_nocextensions 1146 test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_cextensions 1252 test.aaa_profiling.test_orm.SessionTest.test_expire_lots x86_64_linux_cpython_3.10_sqlite_pysqlite_dbapiunicode_nocextensions 1256 diff --git a/test/sql/test_lambdas.py b/test/sql/test_lambdas.py index 29e1258efbf..ede9702010b 100644 --- a/test/sql/test_lambdas.py +++ b/test/sql/test_lambdas.py @@ -31,6 +31,7 @@ from sqlalchemy.types import ARRAY from sqlalchemy.types import Boolean from sqlalchemy.types import Integer +from sqlalchemy.types import JSON from sqlalchemy.types import String @@ -1513,6 +1514,107 @@ def mylambda(): expr, "users.name || :x_1", checkparams={"x_1": "bar"} ) + def test_rhs_type_detection_from_left(self): + """test #9029""" + tt = table("tt", column("q", JSON)) + + x = {"foo": "bar"} + + def mylambda(): + return tt.c.q + x + + expr = coercions.expect(roles.WhereHavingRole, mylambda) + is_(expr._resolved.right.type._type_affinity, JSON) + + def test_rhs_type_detection_standalone(self): + """test related to #9029, as type coercion rule was changed""" + + x = 5 + + def mylambda(): + return x + + expr = coercions.expect(roles.OrderByRole, mylambda) + is_(expr._resolved.type._type_affinity, Integer) + + x = "now im a string" + + # stays as int b.c. _resolved is cached + is_(expr._resolved.type._type_affinity, Integer) + + # make a new one! now it will be string + expr = coercions.expect(roles.OrderByRole, mylambda) + is_(expr._resolved.type._type_affinity, String) + + @testing.only_on("sqlite") + @testing.variation("stmt_type", ["lambda_stmt", "lambda_crit"]) + @testing.variation("callable_type", ["none", "closure", "parameter"]) + def test_9029_integration( + self, metadata, connection, stmt_type, callable_type + ): + t = Table( + "t", + metadata, + Column("id", Integer, primary_key=True), + Column("data", JSON), + ) + + t.create(connection) + + connection.execute( + t.insert(), + { + "id": 12, + "data": {"key": "value", "key2": {"subkey": [1, 2, 3]}}, + }, + ) + + d = {"key": "value", "key2": {"subkey": [1, 2, 3]}} + + if callable_type.none: + if stmt_type.lambda_stmt: + stmt = lambda_stmt(lambda: select(t).filter(t.c.data == d)) + elif stmt_type.lambda_crit: + stmt = select(t).filter(lambda: t.c.data == d) + else: + stmt_type.fail() + + to_run = stmt + + elif callable_type.closure: + + def go(): + if stmt_type.lambda_stmt: + stmt = lambda_stmt(lambda: select(t).filter(t.c.data == d)) + elif stmt_type.lambda_crit: + stmt = select(t).filter(lambda: t.c.data == d) + else: + stmt_type.fail() + return stmt + + to_run = go() + + elif callable_type.parameter: + + def go(data): + if stmt_type.lambda_stmt: + stmt = lambda_stmt( + lambda: select(t).filter(t.c.data == data) + ) + elif stmt_type.lambda_crit: + stmt = select(t).filter(lambda: t.c.data == data) + else: + stmt_type.fail() + + return stmt + + to_run = go(d) + + eq_( + connection.execute(to_run).first(), + (12, {"key": "value", "key2": {"subkey": [1, 2, 3]}}), + ) + def test_execute_constructed_uncached(self, user_address_fixture): users, addresses = user_address_fixture From 659562d5ff3aa38505635723a4093040c58d9449 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 27 Dec 2022 15:51:06 -0500 Subject: [PATCH 484/632] dont prefix ssl args with "ssl" in the ssl dict Fixes: #9031 Change-Id: I9ef077e7da5b2328a345f6526a6210ce82d807f6 (cherry picked from commit e0ea78bd079c1545ced7c9233f949fabe296cd07) --- lib/sqlalchemy/dialects/mysql/mysqldb.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 7a721e8e67e..76d9b67fd66 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -44,9 +44,9 @@ "mysql+mysqldb://scott:tiger@192.168.0.134/test", connect_args={ "ssl": { - "ssl_ca": "/home/gord/client-ssl/ca.pem", - "ssl_cert": "/home/gord/client-ssl/client-cert.pem", - "ssl_key": "/home/gord/client-ssl/client-key.pem" + "ca": "/home/gord/client-ssl/ca.pem", + "cert": "/home/gord/client-ssl/client-cert.pem", + "key": "/home/gord/client-ssl/client-key.pem" } } ) From 9b879cee072e112f43f70c5b42df4577798a6eb5 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 28 Dec 2022 13:33:53 +0100 Subject: [PATCH 485/632] Correct URL import in docs Fixes: #9032 Change-Id: I2c4164d8e1c74e0c74a578f4629c714b96761ed5 --- doc/build/core/engines.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index ba800d69830..b0dc2a3cf59 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -130,7 +130,7 @@ The :class:`.URL` object is created using the :meth:`_engine.URL.create()` constructor method, passing all fields individually. Special characters such as those within passwords may be passed without any modification:: - from sqlalchemy import URL + from sqlalchemy.engine import URL url_object = URL.create( "postgresql+pg8000", From 510caee2e68d8665577d67bbc4afda7bbca31f9f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 28 Dec 2022 12:04:07 -0500 Subject: [PATCH 486/632] ensure whereclause, returning copied as tuples Fixed issue in the internal SQL traversal for DML statements like :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other potential issues, a specific issue using lambda statements with the ORM update/delete feature. Fixes: #9033 Change-Id: I76428049cb767ba302fbea89555114bf63ab8687 (cherry picked from commit e68173bf7d296b2948abed06f79c7cbd0ab66b0d) --- doc/build/changelog/unreleased_14/9033.rst | 8 +++++ lib/sqlalchemy/sql/dml.py | 14 ++++---- test/orm/test_update_delete.py | 34 +++++++++++++----- test/sql/test_external_traversal.py | 40 +++++++++++++++++++++- 4 files changed, 79 insertions(+), 17 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9033.rst diff --git a/doc/build/changelog/unreleased_14/9033.rst b/doc/build/changelog/unreleased_14/9033.rst new file mode 100644 index 00000000000..d0b0d2f3fec --- /dev/null +++ b/doc/build/changelog/unreleased_14/9033.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm + :tickets: 9033 + + Fixed issue in the internal SQL traversal for DML statements like + :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other + potential issues, a specific issue using lambda statements with the ORM + update/delete feature. diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index 07a4d7b2d58..ae48740000e 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -928,12 +928,12 @@ class Insert(ValuesBase): ("_multi_values", InternalTraversal.dp_dml_multi_values), ("select", InternalTraversal.dp_clauseelement), ("_post_values_clause", InternalTraversal.dp_clauseelement), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ("_return_defaults", InternalTraversal.dp_boolean), ( "_return_defaults_columns", - InternalTraversal.dp_clauseelement_list, + InternalTraversal.dp_clauseelement_tuple, ), ] + HasPrefixes._has_prefixes_traverse_internals @@ -1208,16 +1208,16 @@ class Update(DMLWhereBase, ValuesBase): _traverse_internals = ( [ ("table", InternalTraversal.dp_clauseelement), - ("_where_criteria", InternalTraversal.dp_clauseelement_list), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), ("_inline", InternalTraversal.dp_boolean), ("_ordered_values", InternalTraversal.dp_dml_ordered_values), ("_values", InternalTraversal.dp_dml_values), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ("_return_defaults", InternalTraversal.dp_boolean), ( "_return_defaults_columns", - InternalTraversal.dp_clauseelement_list, + InternalTraversal.dp_clauseelement_tuple, ), ] + HasPrefixes._has_prefixes_traverse_internals @@ -1436,8 +1436,8 @@ class Delete(DMLWhereBase, UpdateBase): _traverse_internals = ( [ ("table", InternalTraversal.dp_clauseelement), - ("_where_criteria", InternalTraversal.dp_clauseelement_list), - ("_returning", InternalTraversal.dp_clauseelement_list), + ("_where_criteria", InternalTraversal.dp_clauseelement_tuple), + ("_returning", InternalTraversal.dp_clauseelement_tuple), ("_hints", InternalTraversal.dp_table_hint_list), ] + HasPrefixes._has_prefixes_traverse_internals diff --git a/test/orm/test_update_delete.py b/test/orm/test_update_delete.py index 6be271e4603..9eaf1765a31 100644 --- a/test/orm/test_update_delete.py +++ b/test/orm/test_update_delete.py @@ -701,7 +701,8 @@ def test_update_future(self): list(zip([15, 27, 19, 27])), ) - def test_update_future_lambda(self): + @testing.variation("values_first", [True, False]) + def test_update_future_lambda(self, values_first): User, users = self.classes.User, self.tables.users sess = Session(testing.db, future=True) @@ -710,14 +711,22 @@ def test_update_future_lambda(self): sess.execute(select(User).order_by(User.id)).scalars().all() ) - sess.execute( - lambda_stmt( + new_value = 10 + + if values_first: + stmt = lambda_stmt(lambda: update(User)) + stmt += lambda s: s.values({"age": User.age - new_value}) + stmt += lambda s: s.where(User.age > 29).execution_options( + synchronize_session="evaluate" + ) + else: + stmt = lambda_stmt( lambda: update(User) .where(User.age > 29) - .values({"age": User.age - 10}) + .values({"age": User.age - new_value}) .execution_options(synchronize_session="evaluate") - ), - ) + ) + sess.execute(stmt) eq_([john.age, jack.age, jill.age, jane.age], [25, 37, 29, 27]) eq_( @@ -725,14 +734,21 @@ def test_update_future_lambda(self): list(zip([25, 37, 29, 27])), ) - sess.execute( - lambda_stmt( + if values_first: + stmt = lambda_stmt(lambda: update(User)) + stmt += lambda s: s.values({"age": User.age - new_value}) + stmt += lambda s: s.where(User.age > 29).execution_options( + synchronize_session="evaluate" + ) + else: + stmt = lambda_stmt( lambda: update(User) .where(User.age > 29) .values({User.age: User.age - 10}) .execution_options(synchronize_session="evaluate") ) - ) + + sess.execute(stmt) eq_([john.age, jack.age, jill.age, jane.age], [25, 27, 29, 27]) eq_( sess.query(User.age).order_by(User.id).all(), diff --git a/test/sql/test_external_traversal.py b/test/sql/test_external_traversal.py index 37363273b20..7a058bfcdae 100644 --- a/test/sql/test_external_traversal.py +++ b/test/sql/test_external_traversal.py @@ -2693,7 +2693,7 @@ class ValuesBaseTest(fixtures.TestBase, AssertsCompiledSQL): """Tests the generative capability of Insert, Update""" - __dialect__ = "default" + __dialect__ = "default_enhanced" # fixme: consolidate converage from elsewhere here and expand @@ -2935,3 +2935,41 @@ def test_update_no_support_multi_constructor(self): "UPDATE construct does not support multiple parameter sets.", stmt.compile, ) + + @testing.variation("stmt_type", ["update", "delete"]) + def test_whereclause_returning_adapted(self, stmt_type): + """test #9033""" + + if stmt_type.update: + stmt = ( + t1.update() + .where(t1.c.col1 == 10) + .values(col1=15) + .returning(t1.c.col1) + ) + elif stmt_type.delete: + stmt = t1.delete().where(t1.c.col1 == 10).returning(t1.c.col1) + else: + stmt_type.fail() + + stmt = visitors.replacement_traverse(stmt, {}, lambda elem: None) + + assert isinstance(stmt._where_criteria, tuple) + assert isinstance(stmt._returning, tuple) + + stmt = stmt.where(t1.c.col2 == 5).returning(t1.c.col2) + + if stmt_type.update: + self.assert_compile( + stmt, + "UPDATE table1 SET col1=:col1 WHERE table1.col1 = :col1_1 " + "AND table1.col2 = :col2_1 RETURNING table1.col1, table1.col2", + ) + elif stmt_type.delete: + self.assert_compile( + stmt, + "DELETE FROM table1 WHERE table1.col1 = :col1_1 " + "AND table1.col2 = :col2_1 RETURNING table1.col1, table1.col2", + ) + else: + stmt_type.fail() From afcbc3233b83d429e1314355d9f0c343419047b8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 28 Dec 2022 15:30:32 -0500 Subject: [PATCH 487/632] changelog adjustments include backport markings Change-Id: I810923a641977569b8b4d9967e84b8cb684e7a52 (cherry picked from commit 46fe3e53e06864cafbbcbfd8a82a2ec00727b8c5) --- doc/build/changelog/changelog_14.rst | 8 ++++++++ doc/build/changelog/unreleased_14/8974.rst | 3 ++- doc/build/changelog/unreleased_14/8995.rst | 3 ++- doc/build/changelog/unreleased_14/9002.rst | 1 + doc/build/changelog/unreleased_14/9023.rst | 1 + doc/build/changelog/unreleased_14/9029.rst | 3 ++- doc/build/changelog/unreleased_14/9033.rst | 1 + doc/build/changelog/unreleased_14/tox_fix.rst | 1 + 8 files changed, 18 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index ef40bcc3704..790f8a8440a 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -24,6 +24,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, orm :tickets: 8862 + :versions: 2.0.0rc1 Fixed bug where :meth:`_orm.Session.merge` would fail to preserve the current loaded contents of relationship attributes that were indicated with @@ -49,6 +50,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, orm :tickets: 8881 + :versions: 2.0.0rc1 Fixed issues in :func:`_orm.with_expression` where expressions that were composed of columns that were referenced from the enclosing SELECT would @@ -92,6 +94,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, postgresql :tickets: 8748 + :versions: 2.0.0rc1 Made an adjustment to how the PostgreSQL dialect considers column types when it reflects columns from a table, to accommodate for alternative @@ -100,6 +103,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: usecase, sqlite :tickets: 8903 + :versions: 2.0.0rc1 Added support for the SQLite backend to reflect the "DEFERRABLE" and "INITIALLY" keywords which may be present on a foreign key construct. Pull @@ -108,6 +112,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: usecase, sql :tickets: 8800 + :versions: 2.0.0rc1 An informative re-raise is now thrown in the case where any "literal bindparam" render operation fails, indicating the value itself and @@ -117,6 +122,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: usecase, sqlite :tickets: 8804 + :versions: 2.0.0rc1 Added support for reflection of expression-oriented WHERE criteria included in indexes on the SQLite dialect, in a manner similar to that of the @@ -125,6 +131,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, sql :tickets: 8827 + :versions: 2.0.0rc1 Fixed a series of issues regarding the position and sometimes the identity of rendered bound parameters, such as those used for SQLite, asyncpg, @@ -147,6 +154,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, engine :tickets: 8963 + :versions: 2.0.0rc1 Fixed issue where :meth:`_engine.Result.freeze` method would not work for textual SQL using either :func:`_sql.text` or diff --git a/doc/build/changelog/unreleased_14/8974.rst b/doc/build/changelog/unreleased_14/8974.rst index 6400c95b452..a8ae491ad19 100644 --- a/doc/build/changelog/unreleased_14/8974.rst +++ b/doc/build/changelog/unreleased_14/8974.rst @@ -1,6 +1,7 @@ .. change:: - :tags: bug, pool + :tags: bug, engine :tickets: 8974 + :versions: 2.0.0rc1 Fixed a long-standing race condition in the connection pool which could occur under eventlet/gevent monkeypatching schemes in conjunction with the diff --git a/doc/build/changelog/unreleased_14/8995.rst b/doc/build/changelog/unreleased_14/8995.rst index 5191b58de9a..034e7b3f7cd 100644 --- a/doc/build/changelog/unreleased_14/8995.rst +++ b/doc/build/changelog/unreleased_14/8995.rst @@ -1,6 +1,7 @@ .. change:: - :tags: bug, installation + :tags: bug, general :tickets: 8995 + :versions: 2.0.0rc1 Fixed regression where the base compat module was calling upon ``platform.architecture()`` in order to detect some system properties, diff --git a/doc/build/changelog/unreleased_14/9002.rst b/doc/build/changelog/unreleased_14/9002.rst index c2d3f01b9f8..61d491410d4 100644 --- a/doc/build/changelog/unreleased_14/9002.rst +++ b/doc/build/changelog/unreleased_14/9002.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, tests :tickets: 9002 + :versions: 2.0.0rc1 Added new exclusion rule for third party dialects called ``unusual_column_name_characters``, which can be "closed" for third party diff --git a/doc/build/changelog/unreleased_14/9023.rst b/doc/build/changelog/unreleased_14/9023.rst index d17a0cc983c..3bbbd0f3fbe 100644 --- a/doc/build/changelog/unreleased_14/9023.rst +++ b/doc/build/changelog/unreleased_14/9023.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, postgresql :tickets: 9023 + :versions: 2.0.0rc1 Fixed bug where the PostgreSQL :paramref:`_postgresql.OnConflictClause.constraint` parameter would accept diff --git a/doc/build/changelog/unreleased_14/9029.rst b/doc/build/changelog/unreleased_14/9029.rst index 38114e9637c..dd3c24ee4cb 100644 --- a/doc/build/changelog/unreleased_14/9029.rst +++ b/doc/build/changelog/unreleased_14/9029.rst @@ -1,8 +1,9 @@ .. change:: :tags: bug, sql :tickets: 9029 + :versions: 2.0.0rc1 Fixed issue in lambda SQL feature where the calculated type of a literal value would not take into account the type coercion rules of the "compared to type", leading to a lack of typing information for SQL expressions, such - as comparisons to :class:`.JSON` elements and similar. + as comparisons to :class:`_types.JSON` elements and similar. diff --git a/doc/build/changelog/unreleased_14/9033.rst b/doc/build/changelog/unreleased_14/9033.rst index d0b0d2f3fec..e0e39db3887 100644 --- a/doc/build/changelog/unreleased_14/9033.rst +++ b/doc/build/changelog/unreleased_14/9033.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, orm :tickets: 9033 + :versions: 2.0.0rc1 Fixed issue in the internal SQL traversal for DML statements like :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other diff --git a/doc/build/changelog/unreleased_14/tox_fix.rst b/doc/build/changelog/unreleased_14/tox_fix.rst index f37829fc370..7d9b799e01d 100644 --- a/doc/build/changelog/unreleased_14/tox_fix.rst +++ b/doc/build/changelog/unreleased_14/tox_fix.rst @@ -1,5 +1,6 @@ .. change:: :tags: bug, tests + :versions: 2.0.0rc1 Fixed issue in tox.ini file where changes in the tox 4.0 series to the format of "passenv" caused tox to not function correctly, in particular From 74becd0d35005ed3cdfea11da247568e9ec604dc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 28 Dec 2022 15:53:03 -0500 Subject: [PATCH 488/632] note that 2.0 has behavioral changes outside of the 1.4->2.0 process Change-Id: I0ab9611c75f592acec73ca92271f970eae74d7ab (cherry picked from commit 2c1550af60e5fc8303990519b3ae497401196348) --- doc/build/changelog/migration_20.rst | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index f7009a28d57..15e6edfa3b2 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -48,9 +48,12 @@ new ORM declarative system that unifies classical and declarative mapping, support for Python dataclasses, and asyncio support for Core and ORM. The steps to achieve 2.0 migration are in the following subsections; overall, -the general strategy is that once an application runs on 1.4 with all -warning flags turned on and does not emit any 2.0-deprecation warnings, it is -now cross-compatible with SQLAlchemy 2.0. +the general strategy is that once an application runs on 1.4 with all warning +flags turned on and does not emit any 2.0-deprecation warnings, it is now +**mostly** cross-compatible with SQLAlchemy 2.0. **Please note there may be +additional API and behavioral changes that may behave differently when running +against SQLAlchemy 2.0; always test code against an actual SQLAlchemy 2.0 +release as the final step in migrating**. First Prerequisite, step one - A Working 1.3 Application @@ -468,6 +471,18 @@ that descend from ``Base``:: that don't use :class:`_orm.Mapped` to remain usable. +.. _migration_20_step_seven: + +Migration to 2.0 Step Seven - Test against a SQLAlchemy 2.0 Release +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As mentioned previously, SQLAlchemy 2.0 has additional API and behavioral +changes that are intended to be backwards compatible, however may introduce +some incompatibilities nonetheless. Therefore after the overall porting +process is complete, the final step is to test against the most recent release +of SQLAlchemy 2.0 to correct for any remaining issues that might be present. + + 2.0 Migration - Core Connection / Transaction ============================================= From 532373b18f2e77910bb642a27a2cca3179499389 Mon Sep 17 00:00:00 2001 From: Lele Gaifax Date: Thu, 29 Dec 2022 10:21:09 +0100 Subject: [PATCH 489/632] Rectify reference to class LambdaElement, misnamed as LamdaElement (#9037) Fixes #9034. --- doc/build/core/connections.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst index 8c3039df0e6..f08b592d28c 100644 --- a/doc/build/core/connections.rst +++ b/doc/build/core/connections.rst @@ -1584,7 +1584,7 @@ Quick Guidelines for Lambdas Above all, the emphasis within the lambda SQL system is ensuring that there is never a mismatch between the cache key generated for a lambda and the -SQL string it will produce. The :class:`_sql.LamdaElement` and related +SQL string it will produce. The :class:`_sql.LambdaElement` and related objects will run and analyze the given lambda in order to calculate how it should be cached on each run, trying to detect any potential problems. Basic guidelines include: From 4f7b6a095590e1216c44935e7a7ba725733ca8d5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 2 Jan 2023 12:27:36 -0500 Subject: [PATCH 490/632] add uber warning for 1.4 As we don't have any automatic deprecation warning for 2.0 unless SQLALCHEMY_WARN_20 is set, applications that are not being monitored for deprecations have no way to guard against 2.0 being released on pypi unless they add a requirements rule. make sure we are putting out a major warning for people who may have not noticed that SQLAlchemy 2.0 will break compatibility with legacy use patterns. Fixes: #8983 Change-Id: I7d50db52c9a0fe3165b0131aab2fce9af80d51dd --- lib/sqlalchemy/testing/profiling.py | 2 + lib/sqlalchemy/util/deprecations.py | 66 +++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index 41326303afb..8c4d9a48412 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -251,6 +251,8 @@ def wrap(fn, *args, **kw): with mock.patch.object( deprecations, "SQLALCHEMY_WARN_20", False + ), mock.patch.object( + deprecations, "SILENCE_UBER_WARNING", True ), mock.patch.object( row.LegacyRow, "_default_key_style", row.KEY_OBJECTS_NO_WARN ): diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index b61516d85c4..ca346ee0e2c 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -10,6 +10,7 @@ import os import re +import sys from . import compat from .langhelpers import _hash_limit_string @@ -22,15 +23,29 @@ SQLALCHEMY_WARN_20 = False +SILENCE_UBER_WARNING = False + if os.getenv("SQLALCHEMY_WARN_20", "false").lower() in ("true", "yes", "1"): SQLALCHEMY_WARN_20 = True +if compat.py2k: + SILENCE_UBER_WARNING = True +elif os.getenv("SQLALCHEMY_SILENCE_UBER_WARNING", "false").lower() in ( + "true", + "yes", + "1", +): + SILENCE_UBER_WARNING = True + def _warn_with_version(msg, version, type_, stacklevel, code=None): if ( issubclass(type_, exc.Base20DeprecationWarning) and not SQLALCHEMY_WARN_20 ): + if not SILENCE_UBER_WARNING: + _emit_uber_warning(type_, stacklevel) + return warn = type_(msg, code=code) @@ -39,6 +54,57 @@ def _warn_with_version(msg, version, type_, stacklevel, code=None): _warnings_warn(warn, stacklevel=stacklevel + 1) +def _emit_uber_warning(type_, stacklevel): + global SILENCE_UBER_WARNING + + if SILENCE_UBER_WARNING: + return + + SILENCE_UBER_WARNING = True + + file_ = sys.stderr + + # source: https://github.com/pytest-dev/pytest/blob/326ae0cd88f5e954c8effc2b0c986832e9caff11/src/_pytest/_io/terminalwriter.py#L35-L37 # noqa: E501 + use_color = ( + hasattr(file_, "isatty") + and file_.isatty() + and os.environ.get("TERM") != "dumb" + ) + + msg = ( + "%(red)sDeprecated API features detected! " + "These feature(s) are not compatible with SQLAlchemy 2.0. " + "%(green)sTo prevent incompatible upgrades prior to updating " + "applications, ensure requirements files are " + 'pinned to "sqlalchemy<2.0". ' + "%(cyan)sSet environment variable SQLALCHEMY_WARN_20=1 to show all " + "deprecation warnings. Set environment variable " + "SQLALCHEMY_SILENCE_UBER_WARNING=1 to silence this message.%(nocolor)s" + ) + + if use_color: + msg = msg % { + "red": "\x1b[31m", + "cyan": "\x1b[36m", + "green": "\x1b[32m", + "magenta": "\x1b[35m", + "nocolor": "\x1b[0m", + } + else: + msg = msg % { + "red": "", + "cyan": "", + "green": "", + "magenta": "", + "nocolor": "", + } + + # note this is a exc.Base20DeprecationWarning subclass, which + # will implicitly add the link to the SQLAlchemy 2.0 page in the message + warn = type_(msg) + _warnings_warn(warn, stacklevel=stacklevel + 1) + + def warn_deprecated(msg, version, stacklevel=3, code=None): _warn_with_version( msg, version, exc.SADeprecationWarning, stacklevel, code=code From cba29d19c0b0d72b924b590eaaf886f1afd5ac3c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 2 Jan 2023 23:58:06 -0500 Subject: [PATCH 491/632] repair underline levels in 20 migration doc two sections got whacked here, update underlines to be the same as the 2.0 branch Change-Id: Ic36d51ad9a2cbe23482f72e73f4cbbcbe1617780 --- doc/build/changelog/migration_20.rst | 80 ++++++++++++++-------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 15e6edfa3b2..7e567cbe60b 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -1,8 +1,8 @@ .. _migration_20_toplevel: -============================= +=========================== Migrating to SQLAlchemy 2.0 -============================= +=========================== .. admonition:: About this document @@ -30,7 +30,7 @@ Migrating to SQLAlchemy 2.0 Overview -======== +-------- The SQLAlchemy 2.0 transition presents itself in the SQLAlchemy 1.4 release as a series of steps that allow an application of any size or complexity to be @@ -57,7 +57,7 @@ release as the final step in migrating**. First Prerequisite, step one - A Working 1.3 Application ---------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is getting an existing application onto 1.4, in the case of a typical non trivial application, is to ensure it runs on SQLAlchemy 1.3 with @@ -73,7 +73,7 @@ warnings; these are warnings emitted for the :class:`_exc.SADeprecationWarning` class. First Prerequisite, step two - A Working 1.4 Application --------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Once the application is good to go on SQLAlchemy 1.3, the next step is to get it running on SQLAlchemy 1.4. In the vast majority of cases, applications @@ -121,7 +121,7 @@ For the full overview of SQLAlchemy 1.4 changes, see the :doc:`/changelog/migration_14` document. Migration to 2.0 Step One - Python 3 only (Python 3.7 minimum for 2.0 compatibility) ------------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy 2.0 was first inspired by the fact that Python 2's EOL was in 2020. SQLAlchemy is taking a longer period of time than other major projects to drop @@ -134,7 +134,7 @@ application can remain running on Python 2.7 or on at least Python 3.6. Version .. _migration_20_deprecations_mode: Migration to 2.0 Step Two - Turn on RemovedIn20Warnings -------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy 1.4 features a conditional deprecation warning system inspired by the Python "-3" flag that would indicate legacy patterns in a running @@ -238,7 +238,7 @@ on is then ready to run in SQLAlchemy 2.0. Migration to 2.0 Step Three - Resolve all RemovedIn20Warnings --------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Code can be developed iteratively to resolve these warnings. Within the SQLAlchemy project itself, the approach taken is as follows: @@ -284,7 +284,7 @@ the SQLAlchemy project itself, the approach taken is as follows: 4. Once no more warnings are emitted, the filter can be removed. Migration to 2.0 Step Four - Use the ``future`` flag on Engine --------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`_engine.Engine` object features an updated transaction-level API in version 2.0. In 1.4, this new API is available @@ -322,7 +322,7 @@ The new engine is described at :class:`_future.Engine` which delivers a new conn.commit() # commit as you go Migration to 2.0 Step Five - Use the ``future`` flag on Session ---------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :class:`_orm.Session` object also features an updated transaction/connection level API in version 2.0. This API is available in 1.4 using the @@ -384,7 +384,7 @@ major API modifications. .. _migration_20_step_six: Migration to 2.0 Step Six - Add ``__allow_unmapped__`` to explicitly typed ORM models --------------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SQLAlchemy 2.0 has new support for runtime interpretation of :pep:`484` typing annotations on ORM models. A requirement of these annotations is that they must make use @@ -474,7 +474,7 @@ that descend from ``Base``:: .. _migration_20_step_seven: Migration to 2.0 Step Seven - Test against a SQLAlchemy 2.0 Release -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned previously, SQLAlchemy 2.0 has additional API and behavioral changes that are intended to be backwards compatible, however may introduce @@ -484,13 +484,13 @@ of SQLAlchemy 2.0 to correct for any remaining issues that might be present. 2.0 Migration - Core Connection / Transaction -============================================= +--------------------------------------------- .. _migration_20_autocommit: Library-level (but not driver level) "Autocommit" removed from both Core and ORM --------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -639,7 +639,7 @@ is turned on. .. _migration_20_implicit_execution: "Implicit" and "Connectionless" execution, "bound metadata" removed --------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -829,7 +829,7 @@ in the case that the operation is a write operation:: result = conn.execute(stmt) execute() method more strict, execution options are more prominent -------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -904,7 +904,7 @@ given. .. _migration_20_result_rows: Result rows act like named tuples ---------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -975,12 +975,12 @@ or attribute:: 2.0 Migration - Core Usage -============================= +-------------------------- .. _migration_20_5284: select() no longer accepts varied constructor arguments, columns are passed positionally ------------------------------------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **synopsis** @@ -1065,7 +1065,7 @@ Examples of "structural" vs. "data" elements are as follows:: :ref:`error_c9ae` insert/update/delete DML no longer accept keyword constructor arguments ------------------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1118,10 +1118,10 @@ manner as that of the :func:`_sql.select` construct. 2.0 Migration - ORM Configuration -============================================= +--------------------------------- Declarative becomes a first class API -------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1162,7 +1162,7 @@ at :ref:`change_5508`. The original "mapper()" function now a core element of Declarative, renamed ----------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1241,7 +1241,7 @@ declarative decorator and classical mapping forms. Declarative, classical mapping, dataclasses, attrs, etc. 2.0 Migration - ORM Usage -============================================= +------------------------- The biggest visible change in SQLAlchemy 2.0 is the use of :meth:`_orm.Session.execute` in conjunction with :func:`_sql.select` to run ORM @@ -1464,7 +1464,7 @@ following the table, and may include additional notes not summarized here. .. _migration_20_unify_select: ORM Query Unified with Core Select ----------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1623,7 +1623,7 @@ the majority of this ORM logic is also cached. .. _migration_20_get_to_session: ORM Query - get() method moves to Session ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1659,7 +1659,7 @@ with writing a SQL query. .. _migration_20_orm_query_join_strings: ORM Query - Joining / loading on relationships uses attributes, not strings ----------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1713,7 +1713,7 @@ more potentially compatible with IDEs and pep-484 integrations. ORM Query - Chaining using lists of attributes, rather than individual calls, removed -------------------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1751,7 +1751,7 @@ interface of methods such as :meth:`_sql.Select.join`. .. _migration_20_query_join_options: ORM Query - join(..., aliased=True), from_joinpoint removed ------------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1806,7 +1806,7 @@ construct itself didn't exist early on. .. _migration_20_query_distinct: Using DISTINCT with additional columns, but only select the entity -------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1862,7 +1862,7 @@ without inconvenience. .. _migration_20_query_from_self: Selecting from the query itself as a subquery, e.g. "from_self()" -------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -1966,7 +1966,7 @@ The above query will disambiguate the ``.id`` column of ``User`` and :ticket:`5221` Selecting entities from alternative selectables; Query.select_entity_from() ---------------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -2018,7 +2018,7 @@ of view as well as how the internals of the SQLAlchemy ORM must handle it. .. _joinedload_not_uniqued: ORM Rows not uniquified by default ----------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -2079,7 +2079,7 @@ and should be preferred. .. _migration_20_dynamic_loaders: Making use of "dynamic" relationship loads without using Query ---------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -2137,7 +2137,7 @@ uses the ``.statement`` attribute, such as .. _migration_20_session_autocommit: Autocommit mode removed from Session; autobegin support added -------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -2188,7 +2188,7 @@ as well as to allow the use of "subtransactions", which are also removed in .. _migration_20_session_subtransaction: Session "subtransaction" behavior removed ------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Synopsis** @@ -2270,10 +2270,10 @@ operations are performed with a single begin/commit pair. 2.0 Migration - ORM Extension and Recipe Changes -================================================ +------------------------------------------------ Dogpile cache recipe and Horizontal Sharding uses new Session API ------------------------------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As the :class:`_orm.Query` object becomes legacy, these two recipes which previously relied upon subclassing of the :class:`_orm.Query` @@ -2284,7 +2284,7 @@ an example. Baked Query Extension Superseded by built-in caching ------------------------------------------------------ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The baked query extension is superseded by the built in caching system and is no longer used by the ORM internals. @@ -2294,7 +2294,7 @@ See :ref:`sql_caching` for full background on the new caching system. Asyncio Support -===================== +--------------- SQLAlchemy 1.4 includes asyncio support for both Core and ORM. The new API exclusively makes use of the "future" patterns noted above. From 90457077c23a45e505b65224e2ca30704a7e95dd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 09:48:06 -0500 Subject: [PATCH 492/632] link to with_variant() narrative from API doc Change-Id: I5d8e4d7cb7871bedebe0fe89758be441e64b94c6 (cherry picked from commit 7f86be997eea27bc994b25c4fb65d72b3d4567f9) --- doc/build/core/type_basics.rst | 2 ++ lib/sqlalchemy/sql/type_api.py | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/doc/build/core/type_basics.rst b/doc/build/core/type_basics.rst index 62d941e6639..ed4d928e6d0 100644 --- a/doc/build/core/type_basics.rst +++ b/doc/build/core/type_basics.rst @@ -127,6 +127,8 @@ API documentation for backend-specific types are in the dialect-specific documentation, listed at :ref:`dialect_toplevel`. +.. _types_with_variant: + Using "UPPERCASE" and Backend-specific types for multiple backends ------------------------------------------------------------------ diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 30fc4189bba..8c786968e01 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -543,6 +543,11 @@ def with_variant(self, type_, dialect_name): :param dialect_name: base name of the dialect which uses this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) + .. seealso:: + + :ref:`types_with_variant` - illustrates the use of + :meth:`_types.TypeEngine.with_variant`. + """ return Variant(self, {dialect_name: to_instance(type_)}) From 4f5970525403c98d46ed1a60f7816bc413a8c523 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 12:50:29 -0500 Subject: [PATCH 493/632] happy new year 2023 Change-Id: I14db8e9c69a832b0f5dae8036db3c0a70bb49edd --- LICENSE | 2 +- doc/build/conf.py | 2 +- doc/build/copyright.rst | 2 +- lib/sqlalchemy/__init__.py | 2 +- lib/sqlalchemy/cextension/immutabledict.c | 2 +- lib/sqlalchemy/cextension/processors.c | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 2 +- lib/sqlalchemy/connectors/__init__.py | 2 +- lib/sqlalchemy/connectors/mxodbc.py | 2 +- lib/sqlalchemy/connectors/pyodbc.py | 2 +- lib/sqlalchemy/databases/__init__.py | 2 +- lib/sqlalchemy/dialects/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/base.py | 2 +- lib/sqlalchemy/dialects/firebird/fdb.py | 2 +- lib/sqlalchemy/dialects/firebird/kinterbasdb.py | 2 +- lib/sqlalchemy/dialects/mssql/__init__.py | 2 +- lib/sqlalchemy/dialects/mssql/base.py | 2 +- lib/sqlalchemy/dialects/mssql/information_schema.py | 2 +- lib/sqlalchemy/dialects/mssql/mxodbc.py | 2 +- lib/sqlalchemy/dialects/mssql/pymssql.py | 2 +- lib/sqlalchemy/dialects/mssql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/__init__.py | 2 +- lib/sqlalchemy/dialects/mysql/aiomysql.py | 2 +- lib/sqlalchemy/dialects/mysql/asyncmy.py | 2 +- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/cymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/enumerated.py | 2 +- lib/sqlalchemy/dialects/mysql/json.py | 2 +- lib/sqlalchemy/dialects/mysql/mariadbconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqlconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqldb.py | 2 +- lib/sqlalchemy/dialects/mysql/oursql.py | 2 +- lib/sqlalchemy/dialects/mysql/pymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/reflection.py | 2 +- lib/sqlalchemy/dialects/mysql/reserved_words.py | 2 +- lib/sqlalchemy/dialects/mysql/types.py | 2 +- lib/sqlalchemy/dialects/oracle/__init__.py | 2 +- lib/sqlalchemy/dialects/oracle/base.py | 2 +- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 2 +- lib/sqlalchemy/dialects/postgresql/__init__.py | 2 +- lib/sqlalchemy/dialects/postgresql/array.py | 2 +- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/dialects/postgresql/dml.py | 2 +- lib/sqlalchemy/dialects/postgresql/ext.py | 2 +- lib/sqlalchemy/dialects/postgresql/hstore.py | 2 +- lib/sqlalchemy/dialects/postgresql/json.py | 2 +- lib/sqlalchemy/dialects/postgresql/pg8000.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py | 2 +- lib/sqlalchemy/dialects/postgresql/pygresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/pypostgresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/ranges.py | 2 +- lib/sqlalchemy/dialects/sqlite/__init__.py | 2 +- lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 2 +- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- lib/sqlalchemy/dialects/sqlite/dml.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlcipher.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlite.py | 2 +- lib/sqlalchemy/dialects/sybase/__init__.py | 2 +- lib/sqlalchemy/dialects/sybase/base.py | 2 +- lib/sqlalchemy/dialects/sybase/mxodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pyodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pysybase.py | 2 +- lib/sqlalchemy/engine/__init__.py | 2 +- lib/sqlalchemy/engine/base.py | 2 +- lib/sqlalchemy/engine/create.py | 2 +- lib/sqlalchemy/engine/cursor.py | 2 +- lib/sqlalchemy/engine/default.py | 2 +- lib/sqlalchemy/engine/events.py | 2 +- lib/sqlalchemy/engine/interfaces.py | 2 +- lib/sqlalchemy/engine/mock.py | 2 +- lib/sqlalchemy/engine/reflection.py | 2 +- lib/sqlalchemy/engine/result.py | 2 +- lib/sqlalchemy/engine/row.py | 2 +- lib/sqlalchemy/engine/strategies.py | 2 +- lib/sqlalchemy/engine/url.py | 2 +- lib/sqlalchemy/engine/util.py | 2 +- lib/sqlalchemy/event/__init__.py | 2 +- lib/sqlalchemy/event/api.py | 2 +- lib/sqlalchemy/event/attr.py | 2 +- lib/sqlalchemy/event/base.py | 2 +- lib/sqlalchemy/event/legacy.py | 2 +- lib/sqlalchemy/event/registry.py | 2 +- lib/sqlalchemy/events.py | 2 +- lib/sqlalchemy/exc.py | 2 +- lib/sqlalchemy/ext/__init__.py | 2 +- lib/sqlalchemy/ext/associationproxy.py | 2 +- lib/sqlalchemy/ext/asyncio/__init__.py | 2 +- lib/sqlalchemy/ext/asyncio/base.py | 2 +- lib/sqlalchemy/ext/asyncio/engine.py | 2 +- lib/sqlalchemy/ext/asyncio/events.py | 2 +- lib/sqlalchemy/ext/asyncio/exc.py | 2 +- lib/sqlalchemy/ext/asyncio/result.py | 2 +- lib/sqlalchemy/ext/asyncio/scoping.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 2 +- lib/sqlalchemy/ext/automap.py | 2 +- lib/sqlalchemy/ext/baked.py | 2 +- lib/sqlalchemy/ext/compiler.py | 2 +- lib/sqlalchemy/ext/declarative/__init__.py | 2 +- lib/sqlalchemy/ext/declarative/extensions.py | 2 +- lib/sqlalchemy/ext/horizontal_shard.py | 2 +- lib/sqlalchemy/ext/hybrid.py | 2 +- lib/sqlalchemy/ext/indexable.py | 2 +- lib/sqlalchemy/ext/mutable.py | 2 +- lib/sqlalchemy/ext/orderinglist.py | 2 +- lib/sqlalchemy/ext/serializer.py | 2 +- lib/sqlalchemy/future/__init__.py | 2 +- lib/sqlalchemy/future/orm/__init__.py | 2 +- lib/sqlalchemy/inspection.py | 2 +- lib/sqlalchemy/log.py | 2 +- lib/sqlalchemy/orm/__init__.py | 2 +- lib/sqlalchemy/orm/attributes.py | 2 +- lib/sqlalchemy/orm/base.py | 2 +- lib/sqlalchemy/orm/clsregistry.py | 2 +- lib/sqlalchemy/orm/collections.py | 2 +- lib/sqlalchemy/orm/context.py | 2 +- lib/sqlalchemy/orm/decl_api.py | 2 +- lib/sqlalchemy/orm/decl_base.py | 2 +- lib/sqlalchemy/orm/dependency.py | 2 +- lib/sqlalchemy/orm/descriptor_props.py | 2 +- lib/sqlalchemy/orm/dynamic.py | 2 +- lib/sqlalchemy/orm/evaluator.py | 2 +- lib/sqlalchemy/orm/events.py | 2 +- lib/sqlalchemy/orm/exc.py | 2 +- lib/sqlalchemy/orm/identity.py | 2 +- lib/sqlalchemy/orm/instrumentation.py | 2 +- lib/sqlalchemy/orm/interfaces.py | 2 +- lib/sqlalchemy/orm/loading.py | 2 +- lib/sqlalchemy/orm/mapper.py | 2 +- lib/sqlalchemy/orm/path_registry.py | 2 +- lib/sqlalchemy/orm/persistence.py | 2 +- lib/sqlalchemy/orm/properties.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- lib/sqlalchemy/orm/relationships.py | 2 +- lib/sqlalchemy/orm/scoping.py | 2 +- lib/sqlalchemy/orm/session.py | 2 +- lib/sqlalchemy/orm/state.py | 2 +- lib/sqlalchemy/orm/strategies.py | 2 +- lib/sqlalchemy/orm/strategy_options.py | 2 +- lib/sqlalchemy/orm/sync.py | 2 +- lib/sqlalchemy/orm/unitofwork.py | 2 +- lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/pool/__init__.py | 2 +- lib/sqlalchemy/pool/base.py | 2 +- lib/sqlalchemy/pool/dbapi_proxy.py | 2 +- lib/sqlalchemy/pool/events.py | 2 +- lib/sqlalchemy/pool/impl.py | 2 +- lib/sqlalchemy/processors.py | 2 +- lib/sqlalchemy/schema.py | 2 +- lib/sqlalchemy/sql/__init__.py | 2 +- lib/sqlalchemy/sql/annotation.py | 2 +- lib/sqlalchemy/sql/base.py | 2 +- lib/sqlalchemy/sql/coercions.py | 2 +- lib/sqlalchemy/sql/compiler.py | 2 +- lib/sqlalchemy/sql/crud.py | 2 +- lib/sqlalchemy/sql/ddl.py | 2 +- lib/sqlalchemy/sql/default_comparator.py | 2 +- lib/sqlalchemy/sql/dml.py | 2 +- lib/sqlalchemy/sql/elements.py | 2 +- lib/sqlalchemy/sql/events.py | 2 +- lib/sqlalchemy/sql/expression.py | 2 +- lib/sqlalchemy/sql/functions.py | 2 +- lib/sqlalchemy/sql/lambdas.py | 2 +- lib/sqlalchemy/sql/naming.py | 2 +- lib/sqlalchemy/sql/operators.py | 2 +- lib/sqlalchemy/sql/roles.py | 2 +- lib/sqlalchemy/sql/schema.py | 2 +- lib/sqlalchemy/sql/selectable.py | 2 +- lib/sqlalchemy/sql/sqltypes.py | 2 +- lib/sqlalchemy/sql/type_api.py | 2 +- lib/sqlalchemy/sql/util.py | 2 +- lib/sqlalchemy/sql/visitors.py | 2 +- lib/sqlalchemy/testing/__init__.py | 2 +- lib/sqlalchemy/testing/assertions.py | 2 +- lib/sqlalchemy/testing/assertsql.py | 2 +- lib/sqlalchemy/testing/asyncio.py | 2 +- lib/sqlalchemy/testing/config.py | 2 +- lib/sqlalchemy/testing/engines.py | 2 +- lib/sqlalchemy/testing/entities.py | 2 +- lib/sqlalchemy/testing/exclusions.py | 2 +- lib/sqlalchemy/testing/fixtures.py | 2 +- lib/sqlalchemy/testing/mock.py | 2 +- lib/sqlalchemy/testing/pickleable.py | 2 +- lib/sqlalchemy/testing/plugin/plugin_base.py | 2 +- lib/sqlalchemy/testing/profiling.py | 2 +- lib/sqlalchemy/testing/requirements.py | 2 +- lib/sqlalchemy/testing/schema.py | 2 +- lib/sqlalchemy/testing/util.py | 2 +- lib/sqlalchemy/testing/warnings.py | 2 +- lib/sqlalchemy/types.py | 2 +- lib/sqlalchemy/util/__init__.py | 2 +- lib/sqlalchemy/util/_collections.py | 2 +- lib/sqlalchemy/util/_compat_py3k.py | 2 +- lib/sqlalchemy/util/_concurrency_py3k.py | 2 +- lib/sqlalchemy/util/_preloaded.py | 2 +- lib/sqlalchemy/util/compat.py | 2 +- lib/sqlalchemy/util/concurrency.py | 2 +- lib/sqlalchemy/util/deprecations.py | 2 +- lib/sqlalchemy/util/langhelpers.py | 2 +- lib/sqlalchemy/util/queue.py | 2 +- lib/sqlalchemy/util/topological.py | 2 +- 204 files changed, 204 insertions(+), 204 deletions(-) diff --git a/LICENSE b/LICENSE index c933e4b1ae0..7bf9bbe9683 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2005-2022 SQLAlchemy authors and contributors . +Copyright 2005-2023 SQLAlchemy authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/build/conf.py b/doc/build/conf.py index 97346f6ff2d..737026ef281 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -204,7 +204,7 @@ # General information about the project. project = u"SQLAlchemy" -copyright = u"2007-2022, the SQLAlchemy authors and contributors" # noqa +copyright = u"2007-2023, the SQLAlchemy authors and contributors" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/build/copyright.rst b/doc/build/copyright.rst index 59df651312d..aa4abac9b1d 100644 --- a/doc/build/copyright.rst +++ b/doc/build/copyright.rst @@ -6,7 +6,7 @@ Appendix: Copyright This is the MIT license: ``_ -Copyright (c) 2005-2022 Michael Bayer and contributors. +Copyright (c) 2005-2023 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index d545406a156..640872ea08b 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ # sqlalchemy/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/immutabledict.c b/lib/sqlalchemy/cextension/immutabledict.c index 53e1ab34934..2dbc7381de7 100644 --- a/lib/sqlalchemy/cextension/immutabledict.c +++ b/lib/sqlalchemy/cextension/immutabledict.c @@ -1,6 +1,6 @@ /* immuatbledict.c -Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +Copyright (C) 2005-2023 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c index afe4234b92c..d6e7c4fe4e0 100644 --- a/lib/sqlalchemy/cextension/processors.c +++ b/lib/sqlalchemy/cextension/processors.c @@ -1,6 +1,6 @@ /* processors.c -Copyright (C) 2010-2022 the SQLAlchemy authors and contributors +Copyright (C) 2010-2023 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 785ad7e807d..9d1f0ead480 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -1,6 +1,6 @@ /* resultproxy.c -Copyright (C) 2010-2022 the SQLAlchemy authors and contributors +Copyright (C) 2010-2023 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py index e738086e6fa..d043c44ce65 100644 --- a/lib/sqlalchemy/connectors/__init__.py +++ b/lib/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 89b348433f5..8ed18dfedbf 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index 9bb67b5113f..a8b9cdfae04 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py index fa832298359..6af6d0448a2 100644 --- a/lib/sqlalchemy/databases/__init__.py +++ b/lib/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py index 84a9ad81fc4..0671b4d0fb9 100644 --- a/lib/sqlalchemy/dialects/__init__.py +++ b/lib/sqlalchemy/dialects/__init__.py @@ -1,5 +1,5 @@ # dialects/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py index a34eecf9def..c2e63f7f816 100644 --- a/lib/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ # firebird/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index e2698b17817..5fd24ee272a 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ # firebird/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index 38f443267c7..3a093b38d68 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ # firebird/fdb.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index b999404641f..d2a110ebd01 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ # firebird/kinterbasdb.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py index cae01682c81..22c38eafccc 100644 --- a/lib/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ # mssql/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index ea9c90a51ec..948d3afb063 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ # mssql/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index df914936899..8ca95d79b5b 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ # mssql/information_schema.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index 95c32d45298..4aceb56c6f7 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ # mssql/mxodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index c4ec66d1176..ddb8f1fbcbb 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ # mssql/pymssql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 053b7ac5482..104774d428b 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ # mssql/pyodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index 04c83d161ee..b58e7bee989 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ # mysql/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index 975467c24f7..93e500c32bf 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -1,5 +1,5 @@ # mysql/aiomysql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 521918a5a17..a27f24bab9c 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -1,5 +1,5 @@ # mysql/asyncmy.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 70b60a0a0fe..17916e40448 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ # mysql/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index a67a194a991..889fe46105b 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ # mysql/cymysql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index 6c9ef28ec16..3f5ae77f588 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -1,5 +1,5 @@ # mysql/enumerated.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py index 857fccebd4a..bc9b62cc76e 100644 --- a/lib/sqlalchemy/dialects/mysql/json.py +++ b/lib/sqlalchemy/dialects/mysql/json.py @@ -1,5 +1,5 @@ # mysql/json.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index c8b2eada6dc..01c83b06a90 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -1,5 +1,5 @@ # mysql/mariadbconnector.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index 356babe70fd..bb173b335b1 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ # mysql/mysqlconnector.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 76d9b67fd66..9e31f4ba2af 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ # mysql/mysqldb.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index f6287dc7aea..603ee18e9ba 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ # mysql/oursql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index f6201333215..5e234fbec1f 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ # mysql/pymysql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index bfa61f6480f..9703b82e2f6 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ # mysql/pyodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index f536496d469..fae9a387e55 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -1,5 +1,5 @@ # mysql/reflection.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reserved_words.py b/lib/sqlalchemy/dialects/mysql/reserved_words.py index 995168bbb09..8a9198e5fac 100644 --- a/lib/sqlalchemy/dialects/mysql/reserved_words.py +++ b/lib/sqlalchemy/dialects/mysql/reserved_words.py @@ -1,5 +1,5 @@ # mysql/reserved_words.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index b81ee95ac1d..b7ba17772ab 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -1,5 +1,5 @@ # mysql/types.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py index c83e0573d68..7ad817f29ba 100644 --- a/lib/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ # oracle/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 934d4c719e2..c1736a1f9fd 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ # oracle/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index acdf4ded28a..0f4befe415a 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 262e160d8d0..7fb791edb39 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -1,5 +1,5 @@ # postgresql/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 2f915c975ea..9f8cc39027a 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -1,5 +1,5 @@ # postgresql/array.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 39b0f544cb4..f9f7e34b548 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1,5 +1,5 @@ # postgresql/asyncpg.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index aceec887e17..61e9645626b 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ # postgresql/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index e7b126b3eb5..b8d6d66729c 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -1,5 +1,5 @@ # postgresql/on_conflict.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index e6b992e88a9..eb485fe7925 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -1,5 +1,5 @@ # postgresql/ext.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 3859395a86e..65b93753da0 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ # postgresql/hstore.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index daaaeacc121..534d8711180 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ # postgresql/json.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index 98561a9b99b..6a3e67adc2a 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ # postgresql/pg8000.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 67474271e8e..d26b649f789 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ # postgresql/psycopg2.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py index 10d1aae5d28..a169dd9c777 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py index d273b8c5be0..6bae6a4f11f 100644 --- a/lib/sqlalchemy/dialects/postgresql/pygresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -1,5 +1,5 @@ # postgresql/pygresql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index 886e368c5a2..aa5a0e0d7d8 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ # postgresql/pypostgresql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index 51f3b0489f8..667b986c32d 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -1,4 +1,4 @@ -# Copyright (C) 2013-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2013-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py index 8d8d933b912..dba10f89f54 100644 --- a/lib/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -1,5 +1,5 @@ # sqlite/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index 04adabfb6a1..b5ec6e27d94 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -1,5 +1,5 @@ # sqlite/aiosqlite.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 56294f4bd54..61a0a97df19 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ # sqlite/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index b04a5e6eb62..b78caf277ec 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py index 65f94c81353..d99113f3011 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -1,5 +1,5 @@ # sqlite/pysqlcipher.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index d9fa9413e78..b10c17de2a6 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ # sqlite/pysqlite.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py index c7755c8e767..92cda9de132 100644 --- a/lib/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ # sybase/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 83248d10c63..f2056a22ced 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ # sybase/base.py -# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py index fe5a61460fb..deae27e3eac 100644 --- a/lib/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ # sybase/mxodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index f408e8f9c36..43a48fe3a57 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ # sybase/pyodbc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index 4c96aacd78b..2778f598638 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ # sybase/pysybase.py -# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 2437e170dfa..ec699b0ec46 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 00e1be77669..d08e3eb7d0d 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 8c929ccc4ab..2e1219b442c 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -1,5 +1,5 @@ # engine/create.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 168e08d1114..02f6d5a0ac1 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1,5 +1,5 @@ # engine/cursor.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index e050bea7a7f..35b6c31ce21 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index 286c4d48773..7343582317d 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -1,5 +1,5 @@ # sqlalchemy/engine/events.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index 4e0ab8e72fd..de78ad920ae 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/mock.py b/lib/sqlalchemy/engine/mock.py index 6fcb09f1b7d..b9ef04097cb 100644 --- a/lib/sqlalchemy/engine/mock.py +++ b/lib/sqlalchemy/engine/mock.py @@ -1,5 +1,5 @@ # engine/mock.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index b475228c82d..64b4bba6a74 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index add74f3edf7..116245670b3 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index e80e8c6bec9..f7c00bab37f 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -1,5 +1,5 @@ # engine/row.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index 54a5e51c160..335ec45899d 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 5b54d40d0ac..2defd37aae2 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index 1b03ebbf0ad..be1850dba61 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/__init__.py b/lib/sqlalchemy/event/__init__.py index a89bea894e2..cd5540b0d26 100644 --- a/lib/sqlalchemy/event/__init__.py +++ b/lib/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py index ce44f571ba5..440d5d1bd38 100644 --- a/lib/sqlalchemy/event/api.py +++ b/lib/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index 09b5a2267f0..5a85cb91ee2 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/base.py b/lib/sqlalchemy/event/base.py index 510e16bddfe..57d481dbd9c 100644 --- a/lib/sqlalchemy/event/base.py +++ b/lib/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index 686e4c5bf5d..d2f1fda13ef 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py index ac143c44d33..6bc38a3191e 100644 --- a/lib/sqlalchemy/event/registry.py +++ b/lib/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index d17b0b12f59..3a844647fd8 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -1,5 +1,5 @@ # sqlalchemy/events.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py index 78bcef3a02f..20afe55bc81 100644 --- a/lib/sqlalchemy/exc.py +++ b/lib/sqlalchemy/exc.py @@ -1,5 +1,5 @@ # sqlalchemy/exc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/__init__.py b/lib/sqlalchemy/ext/__init__.py index 62bbbf3ceb2..e3af738b7ce 100644 --- a/lib/sqlalchemy/ext/__init__.py +++ b/lib/sqlalchemy/ext/__init__.py @@ -1,5 +1,5 @@ # ext/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index fbf377afd44..72c9afde675 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index 15b2cb015b7..11539f4e6d1 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -1,5 +1,5 @@ # ext/asyncio/__init__.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/base.py b/lib/sqlalchemy/ext/asyncio/base.py index ae100ecf5f0..bdff1019bd7 100644 --- a/lib/sqlalchemy/ext/asyncio/base.py +++ b/lib/sqlalchemy/ext/asyncio/base.py @@ -1,5 +1,5 @@ # ext/asyncio/base.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index 94e54dc65e8..bc4956be9c4 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -1,5 +1,5 @@ # ext/asyncio/engine.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index c5d5e0126e9..f425922db74 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -1,5 +1,5 @@ # ext/asyncio/events.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/exc.py b/lib/sqlalchemy/ext/asyncio/exc.py index cf0d9a85daf..3f937679b93 100644 --- a/lib/sqlalchemy/ext/asyncio/exc.py +++ b/lib/sqlalchemy/ext/asyncio/exc.py @@ -1,5 +1,5 @@ # ext/asyncio/exc.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index 15553948ba6..31c844698ab 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -1,5 +1,5 @@ # ext/asyncio/result.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 8eca8c52480..4a7d80987c9 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -1,5 +1,5 @@ # ext/asyncio/scoping.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index d167ec0e980..59beb237059 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -1,5 +1,5 @@ # ext/asyncio/session.py -# Copyright (C) 2020-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index a5d7267c211..6990a12effa 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py index 109e0c0c3e5..871ead2af24 100644 --- a/lib/sqlalchemy/ext/baked.py +++ b/lib/sqlalchemy/ext/baked.py @@ -1,5 +1,5 @@ # sqlalchemy/ext/baked.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index 76b59ea6e37..79608b9c825 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/__init__.py b/lib/sqlalchemy/ext/declarative/__init__.py index 6215e35d83d..7f435bcae1b 100644 --- a/lib/sqlalchemy/ext/declarative/__init__.py +++ b/lib/sqlalchemy/ext/declarative/__init__.py @@ -1,5 +1,5 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 4f60d8bf282..ca425501fd3 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -1,5 +1,5 @@ # ext/declarative/extensions.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py index bad076e35b0..9a6963127e2 100644 --- a/lib/sqlalchemy/ext/horizontal_shard.py +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index cc0aca6ca32..ab87d454db0 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/indexable.py b/lib/sqlalchemy/ext/indexable.py index 7cbac542b15..4ca8ac54873 100644 --- a/lib/sqlalchemy/ext/indexable.py +++ b/lib/sqlalchemy/ext/indexable.py @@ -1,5 +1,5 @@ # ext/index.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 45c96178a65..0eac660488f 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index 5a327d1a522..b13576b5da2 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 2b2ee79779b..4d66723361c 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py index a2bed07f1ba..1472c8226a4 100644 --- a/lib/sqlalchemy/future/__init__.py +++ b/lib/sqlalchemy/future/__init__.py @@ -1,5 +1,5 @@ # sql/future/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/orm/__init__.py b/lib/sqlalchemy/future/orm/__init__.py index 629631b3e0a..674dd448cdc 100644 --- a/lib/sqlalchemy/future/orm/__init__.py +++ b/lib/sqlalchemy/future/orm/__init__.py @@ -1,5 +1,5 @@ # sql/future/orm/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/inspection.py b/lib/sqlalchemy/inspection.py index 7f9822d02e9..1f377b46abc 100644 --- a/lib/sqlalchemy/inspection.py +++ b/lib/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ # sqlalchemy/inspect.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index cc662ecf996..7f90aeb32f3 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -1,5 +1,5 @@ # sqlalchemy/log.py -# Copyright (C) 2006-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2006-2023 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 6e0de05c6d3..3e01c6506b3 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 37c7d70235c..c6be3e6d0cf 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index c2f87b54a1a..2cfe022b808 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 2c21498d851..fda5d111028 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -1,5 +1,5 @@ # ext/declarative/clsregistry.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index a189f02dabe..bb2aed4f788 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 62c553d0bfe..a254b08bd71 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1,5 +1,5 @@ # orm/context.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 16f91c69ddb..3a9fae8b1c0 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -1,5 +1,5 @@ # ext/declarative/api.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index 6e1c79745fa..dd2c38d4583 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -1,5 +1,5 @@ # ext/declarative/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 1b5be9a7ec7..911c3edf43b 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py index 3d7f23be1ca..c7e2d984f8a 100644 --- a/lib/sqlalchemy/orm/descriptor_props.py +++ b/lib/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index ec625601178..e19701b02eb 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index f1d9ca5413d..670ab28479e 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index adff448f504..19630ef2820 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py index 8dd4d90d686..0c01d154195 100644 --- a/lib/sqlalchemy/orm/exc.py +++ b/lib/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py index 7de8e2cdec6..695b05064f6 100644 --- a/lib/sqlalchemy/orm/identity.py +++ b/lib/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index a7023a21d98..a8a06f254ea 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 7e86326cc48..51d642d0866 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index b5691c0c978..70fb731453c 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 48f9031430f..d74d708437e 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index 331ddd7dc54..4deb96b1f63 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index a17b24ab5ee..b473022528a 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 19a18173f78..e485e465da3 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index cef98201f3d..8dd988ef38c 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index 4e3664a0cb9..c85323a2b49 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index f3232334c74..8631654921a 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 7d32362b50e..2b90269b2ad 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index b4e7076a4a8..2e0b06f5277 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 2b094214117..770320794d0 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index ce67286ee0d..a4fe6f90f83 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1,4 +1,4 @@ -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index c0418045d54..f92c80fb2a7 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index 22576372d4d..a3854dd3cbb 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 1a9699a0e07..e8fa0731e2e 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py index 6a00ef85088..1a35b1b86c5 100644 --- a/lib/sqlalchemy/pool/__init__.py +++ b/lib/sqlalchemy/pool/__init__.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index dbffd54b857..9bcbc7ac2e6 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -1,5 +1,5 @@ # sqlalchemy/pool.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py index b0c40f2ab7d..8560bb7639b 100644 --- a/lib/sqlalchemy/pool/dbapi_proxy.py +++ b/lib/sqlalchemy/pool/dbapi_proxy.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/dbapi_proxy.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index f0f97832bf1..898223c31f8 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -1,5 +1,5 @@ # sqlalchemy/pool/events.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py index 91d02909556..0004b3fb291 100644 --- a/lib/sqlalchemy/pool/impl.py +++ b/lib/sqlalchemy/pool/impl.py @@ -1,5 +1,5 @@ # sqlalchemy/pool.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py index e7f388fc6da..6cbbb56a846 100644 --- a/lib/sqlalchemy/processors.py +++ b/lib/sqlalchemy/processors.py @@ -1,5 +1,5 @@ # sqlalchemy/processors.py -# Copyright (C) 2010-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index 61f82bba019..bbe7ef09eb1 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py index 26774416185..70583a910b0 100644 --- a/lib/sqlalchemy/sql/__init__.py +++ b/lib/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index 01b5a53a6e3..60e600ddf0b 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index 4519e649ba6..4300b4ef42f 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index ede488915ec..0ba52c40229 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -1,5 +1,5 @@ # sql/coercions.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index cb30c777389..a8d0674604c 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index 920c8b35687..48ab7212861 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -1,5 +1,5 @@ # sql/crud.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index e608052f35f..bed64a56701 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 70586c696f0..73a1c0351b4 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index ae48740000e..ae261830169 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index d438e5995cd..a89273e4da7 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index 872d17cac43..0e6a9d10320 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -1,5 +1,5 @@ # sqlalchemy/sql/events.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index b4aa14e1f9d..3b7f04c3f7c 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -1,5 +1,5 @@ # sql/expression.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index 96f2a3accfa..a15d765d7b7 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -1,5 +1,5 @@ # sql/functions.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 236427d9df6..b574f83ef92 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -1,5 +1,5 @@ # sql/lambdas.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/naming.py b/lib/sqlalchemy/sql/naming.py index b7ad221d2c8..1792f4a46c6 100644 --- a/lib/sqlalchemy/sql/naming.py +++ b/lib/sqlalchemy/sql/naming.py @@ -1,5 +1,5 @@ # sqlalchemy/naming.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 806d41eaf88..8fd851d1561 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1,5 +1,5 @@ # sql/operators.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index 9e146f7ff1a..e9412b40ee6 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -1,5 +1,5 @@ # sql/roles.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 8198a829839..2be14bbf201 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1,5 +1,5 @@ # sql/schema.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index f8252cbe468..54f80273ed0 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1,5 +1,5 @@ # sql/selectable.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index c2b8bbbe4a0..84239c70869 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1,5 +1,5 @@ # sql/sqltypes.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 8c786968e01..b404b41a5e1 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1,5 +1,5 @@ # sql/types_api.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 1a5143fa586..d520b0c6f13 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -1,5 +1,5 @@ # sql/util.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index f72d83a4c74..42307b3bb2a 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -1,5 +1,5 @@ # sql/visitors.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index 7d47210452d..bfd8cc3dc36 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -1,5 +1,5 @@ # testing/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 9a022265eb1..5c646790bf0 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -1,5 +1,5 @@ # testing/assertions.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index 565b3ed7675..e380223a158 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -1,5 +1,5 @@ # testing/assertsql.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index 21890604a37..ccc219f68aa 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -1,5 +1,5 @@ # testing/asyncio.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index 7d19b99be5e..e61bf2694a1 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -1,5 +1,5 @@ # testing/config.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index b8be6b9bd55..29d129fecf2 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/entities.py b/lib/sqlalchemy/testing/entities.py index 8ea65d66933..b1c278a5e1a 100644 --- a/lib/sqlalchemy/testing/entities.py +++ b/lib/sqlalchemy/testing/entities.py @@ -1,5 +1,5 @@ # testing/entities.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index 521a4aa7be7..d4632799406 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -1,5 +1,5 @@ # testing/exclusions.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index 999647b5b19..4d3de1d82a7 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -1,5 +1,5 @@ # testing/fixtures.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/mock.py b/lib/sqlalchemy/testing/mock.py index e333c7007ef..533d31bb16e 100644 --- a/lib/sqlalchemy/testing/mock.py +++ b/lib/sqlalchemy/testing/mock.py @@ -1,5 +1,5 @@ # testing/mock.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index f05960c839a..ec788b62321 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -1,5 +1,5 @@ # testing/pickleable.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index c941332f311..e594f3736ae 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -1,5 +1,5 @@ # plugin/plugin_base.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index 8c4d9a48412..eba23547f57 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -1,5 +1,5 @@ # testing/profiling.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index e225512889f..0e8eec52d69 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1,5 +1,5 @@ # testing/requirements.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py index bff07a5c900..a6d098dd5d1 100644 --- a/lib/sqlalchemy/testing/schema.py +++ b/lib/sqlalchemy/testing/schema.py @@ -1,5 +1,5 @@ # testing/schema.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index 9baf1014b0e..e187de91bf1 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -1,5 +1,5 @@ # testing/util.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index 3e783872d62..762b0703919 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -1,5 +1,5 @@ # testing/warnings.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index 07263c5b9ee..ed62ddd989e 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -1,5 +1,5 @@ # types.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index c34f73dcaef..7a514e9f12e 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -1,5 +1,5 @@ # util/__init__.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index 8e218303be7..b6ea7fd4f4a 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1,5 +1,5 @@ # util/_collections.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_compat_py3k.py b/lib/sqlalchemy/util/_compat_py3k.py index ce659a41d14..0d31250c9a3 100644 --- a/lib/sqlalchemy/util/_compat_py3k.py +++ b/lib/sqlalchemy/util/_compat_py3k.py @@ -1,5 +1,5 @@ # util/_compat_py3k.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index 0b128344d64..17f6debb0b3 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -1,5 +1,5 @@ # util/_concurrency_py3k.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_preloaded.py b/lib/sqlalchemy/util/_preloaded.py index 1803de40227..2a859feeee8 100644 --- a/lib/sqlalchemy/util/_preloaded.py +++ b/lib/sqlalchemy/util/_preloaded.py @@ -1,5 +1,5 @@ # util/_preloaded.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 460d7161c5a..10a979db16a 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -1,5 +1,5 @@ # util/compat.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index e900b437e7f..59a1a747a57 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -1,5 +1,5 @@ # util/concurrency.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index ca346ee0e2c..8243aeb65d1 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -1,5 +1,5 @@ # util/deprecations.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index c3636f0aba0..e4924e4ea06 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1,5 +1,5 @@ # util/langhelpers.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py index 67c5219c724..77646454182 100644 --- a/lib/sqlalchemy/util/queue.py +++ b/lib/sqlalchemy/util/queue.py @@ -1,5 +1,5 @@ # util/queue.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py index bbc819fc317..2235c2728dd 100644 --- a/lib/sqlalchemy/util/topological.py +++ b/lib/sqlalchemy/util/topological.py @@ -1,5 +1,5 @@ # util/topological.py -# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under From f26207c7307f3449e0235a264db0fcf5b089ff73 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 14:54:14 -0500 Subject: [PATCH 494/632] changelog updates Change-Id: I69581c1563f15762a9a9afae4741e23fefd76906 --- doc/build/changelog/unreleased_14/8983.rst | 17 +++++++++++++++++ doc/build/changelog/unreleased_14/8989.rst | 2 +- doc/build/changelog/unreleased_14/9023.rst | 14 +++++++------- 3 files changed, 25 insertions(+), 8 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/8983.rst diff --git a/doc/build/changelog/unreleased_14/8983.rst b/doc/build/changelog/unreleased_14/8983.rst new file mode 100644 index 00000000000..bfea2ea2a52 --- /dev/null +++ b/doc/build/changelog/unreleased_14/8983.rst @@ -0,0 +1,17 @@ +.. change:: + :tags: change, general + :tickets: 8983 + + A new deprecation "uber warning" is now emitted at once at runtime, the + first time any SQLAlchemy 2.0 deprecation warning would normally be + emitted, but the ``SQLALCHEMY_WARN_20`` environment variable is not set. + This deprecation warning intends to notify users who may not have set an + appropriate constraint in their requirements files to block against a + surprise SQLAlchemy 2.0 upgrade and also alert that the SQLAlchemy 2.0 + upgrade process is available, as the first full 2.0 release is expected + very soon. The deprecation warning can be silenced by setting the + environment variable ``SQLALCHEMY_SILENCE_UBER_WARNING`` to ``"1"``. + + .. seealso:: + + :ref:`migration_20_toplevel` diff --git a/doc/build/changelog/unreleased_14/8989.rst b/doc/build/changelog/unreleased_14/8989.rst index 4c38fdf0190..2287bec7b4d 100644 --- a/doc/build/changelog/unreleased_14/8989.rst +++ b/doc/build/changelog/unreleased_14/8989.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, types + :tags: bug, sql :tickets: 8989 :versions: 2.0.0b5 diff --git a/doc/build/changelog/unreleased_14/9023.rst b/doc/build/changelog/unreleased_14/9023.rst index 3bbbd0f3fbe..8a2d0b9ca37 100644 --- a/doc/build/changelog/unreleased_14/9023.rst +++ b/doc/build/changelog/unreleased_14/9023.rst @@ -4,10 +4,10 @@ :versions: 2.0.0rc1 Fixed bug where the PostgreSQL - :paramref:`_postgresql.OnConflictClause.constraint` parameter would accept - an :class:`.Index` object, however would not expand this index out into its - individual index expressions, instead rendering its name in an ON CONFLICT - ON CONSTRAINT clause, which is not accepted by PostgreSQL; the "constraint - name" form only accepts unique or exclude constraint names. The parameter - continues to accept the index but now expands it out into its component - expressions for the render. + :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` parameter + would accept an :class:`.Index` object, however would not expand this index + out into its individual index expressions, instead rendering its name in an + ON CONFLICT ON CONSTRAINT clause, which is not accepted by PostgreSQL; the + "constraint name" form only accepts unique or exclude constraint names. The + parameter continues to accept the index but now expands it out into its + component expressions for the render. From 02bae22e6dbaa63ca689fa5c2925e89e56e3fc52 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 15:01:40 -0500 Subject: [PATCH 495/632] update wording Change-Id: I19c7d9683218d2c059aa36dd1743661e78974976 --- doc/build/changelog/unreleased_14/8983.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/8983.rst b/doc/build/changelog/unreleased_14/8983.rst index bfea2ea2a52..35f03dbb78d 100644 --- a/doc/build/changelog/unreleased_14/8983.rst +++ b/doc/build/changelog/unreleased_14/8983.rst @@ -2,9 +2,12 @@ :tags: change, general :tickets: 8983 - A new deprecation "uber warning" is now emitted at once at runtime, the + A new deprecation "uber warning" is now emitted at runtime the first time any SQLAlchemy 2.0 deprecation warning would normally be emitted, but the ``SQLALCHEMY_WARN_20`` environment variable is not set. + The warning emits only once at most, before setting a boolean to prevent + it from emitting a second time. + This deprecation warning intends to notify users who may not have set an appropriate constraint in their requirements files to block against a surprise SQLAlchemy 2.0 upgrade and also alert that the SQLAlchemy 2.0 From b3bb55aaea9ee94e50f8db594d65c2bfcdb314ef Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 15:02:32 -0500 Subject: [PATCH 496/632] - 1.4.46 --- doc/build/changelog/changelog_14.rst | 152 +++++++++++++++++- doc/build/changelog/unreleased_14/8393.rst | 7 - doc/build/changelog/unreleased_14/8969.rst | 10 -- doc/build/changelog/unreleased_14/8974.rst | 20 --- doc/build/changelog/unreleased_14/8983.rst | 20 --- doc/build/changelog/unreleased_14/8989.rst | 10 -- doc/build/changelog/unreleased_14/8995.rst | 10 -- doc/build/changelog/unreleased_14/9002.rst | 11 -- doc/build/changelog/unreleased_14/9009.rst | 12 -- doc/build/changelog/unreleased_14/9023.rst | 13 -- doc/build/changelog/unreleased_14/9029.rst | 9 -- doc/build/changelog/unreleased_14/9033.rst | 9 -- doc/build/changelog/unreleased_14/tox_fix.rst | 7 - doc/build/conf.py | 4 +- 14 files changed, 153 insertions(+), 141 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/8393.rst delete mode 100644 doc/build/changelog/unreleased_14/8969.rst delete mode 100644 doc/build/changelog/unreleased_14/8974.rst delete mode 100644 doc/build/changelog/unreleased_14/8983.rst delete mode 100644 doc/build/changelog/unreleased_14/8989.rst delete mode 100644 doc/build/changelog/unreleased_14/8995.rst delete mode 100644 doc/build/changelog/unreleased_14/9002.rst delete mode 100644 doc/build/changelog/unreleased_14/9009.rst delete mode 100644 doc/build/changelog/unreleased_14/9023.rst delete mode 100644 doc/build/changelog/unreleased_14/9029.rst delete mode 100644 doc/build/changelog/unreleased_14/9033.rst delete mode 100644 doc/build/changelog/unreleased_14/tox_fix.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 790f8a8440a..d1ee523a1f8 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,157 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.46 - :include_notes_from: unreleased_14 + :released: January 3, 2023 + + .. change:: + :tags: bug, engine + :tickets: 8974 + :versions: 2.0.0rc1 + + Fixed a long-standing race condition in the connection pool which could + occur under eventlet/gevent monkeypatching schemes in conjunction with the + use of eventlet/gevent ``Timeout`` conditions, where a connection pool + checkout that's interrupted due to the timeout would fail to clean up the + failed state, causing the underlying connection record and sometimes the + database connection itself to "leak", leaving the pool in an invalid state + with unreachable entries. This issue was first identified and fixed in + SQLAlchemy 1.2 for :ticket:`4225`, however the failure modes detected in + that fix failed to accommodate for ``BaseException``, rather than + ``Exception``, which prevented eventlet/gevent ``Timeout`` from being + caught. In addition, a block within initial pool connect has also been + identified and hardened with a ``BaseException`` -> "clean failed connect" + block to accommodate for the same condition in this location. + Big thanks to Github user @niklaus for their tenacious efforts in + identifying and describing this intricate issue. + + .. change:: + :tags: bug, postgresql + :tickets: 9023 + :versions: 2.0.0rc1 + + Fixed bug where the PostgreSQL + :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` parameter + would accept an :class:`.Index` object, however would not expand this index + out into its individual index expressions, instead rendering its name in an + ON CONFLICT ON CONSTRAINT clause, which is not accepted by PostgreSQL; the + "constraint name" form only accepts unique or exclude constraint names. The + parameter continues to accept the index but now expands it out into its + component expressions for the render. + + .. change:: + :tags: bug, general + :tickets: 8995 + :versions: 2.0.0rc1 + + Fixed regression where the base compat module was calling upon + ``platform.architecture()`` in order to detect some system properties, + which results in an over-broad system call against the system-level + ``file`` call that is unavailable under some circumstances, including + within some secure environment configurations. + + .. change:: + :tags: usecase, postgresql + :tickets: 8393 + :versions: 2.0.0b5 + + Added the PostgreSQL type ``MACADDR8``. + Pull request courtesy of Asim Farooq. + + .. change:: + :tags: bug, sqlite + :tickets: 8969 + :versions: 2.0.0b5 + + Fixed regression caused by new support for reflection of partial indexes on + SQLite added in 1.4.45 for :ticket:`8804`, where the ``index_list`` pragma + command in very old versions of SQLite (possibly prior to 3.8.9) does not + return the current expected number of columns, leading to exceptions raised + when reflecting tables and indexes. + + .. change:: + :tags: bug, tests + :versions: 2.0.0rc1 + + Fixed issue in tox.ini file where changes in the tox 4.0 series to the + format of "passenv" caused tox to not function correctly, in particular + raising an error as of tox 4.0.6. + + .. change:: + :tags: bug, tests + :tickets: 9002 + :versions: 2.0.0rc1 + + Added new exclusion rule for third party dialects called + ``unusual_column_name_characters``, which can be "closed" for third party + dialects that don't support column names with unusual characters such as + dots, slashes, or percent signs in them, even if the name is properly + quoted. + + + .. change:: + :tags: bug, sql + :tickets: 9009 + :versions: 2.0.0b5 + + Added parameter + :paramref:`.FunctionElement.column_valued.joins_implicitly`, which is + useful in preventing the "cartesian product" warning when making use of + table-valued or column-valued functions. This parameter was already + introduced for :meth:`.FunctionElement.table_valued` in :ticket:`7845`, + however it failed to be added for :meth:`.FunctionElement.column_valued` + as well. + + .. change:: + :tags: change, general + :tickets: 8983 + + A new deprecation "uber warning" is now emitted at runtime the + first time any SQLAlchemy 2.0 deprecation warning would normally be + emitted, but the ``SQLALCHEMY_WARN_20`` environment variable is not set. + The warning emits only once at most, before setting a boolean to prevent + it from emitting a second time. + + This deprecation warning intends to notify users who may not have set an + appropriate constraint in their requirements files to block against a + surprise SQLAlchemy 2.0 upgrade and also alert that the SQLAlchemy 2.0 + upgrade process is available, as the first full 2.0 release is expected + very soon. The deprecation warning can be silenced by setting the + environment variable ``SQLALCHEMY_SILENCE_UBER_WARNING`` to ``"1"``. + + .. seealso:: + + :ref:`migration_20_toplevel` + + .. change:: + :tags: bug, orm + :tickets: 9033 + :versions: 2.0.0rc1 + + Fixed issue in the internal SQL traversal for DML statements like + :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other + potential issues, a specific issue using lambda statements with the ORM + update/delete feature. + + .. change:: + :tags: bug, sql + :tickets: 8989 + :versions: 2.0.0b5 + + Fixed bug where SQL compilation would fail (assertion fail in 2.0, NoneType + error in 1.4) when using an expression whose type included + :meth:`_types.TypeEngine.bind_expression`, in the context of an "expanding" + (i.e. "IN") parameter in conjunction with the ``literal_binds`` compiler + parameter. + + .. change:: + :tags: bug, sql + :tickets: 9029 + :versions: 2.0.0rc1 + + Fixed issue in lambda SQL feature where the calculated type of a literal + value would not take into account the type coercion rules of the "compared + to type", leading to a lack of typing information for SQL expressions, such + as comparisons to :class:`_types.JSON` elements and similar. .. changelog:: :version: 1.4.45 diff --git a/doc/build/changelog/unreleased_14/8393.rst b/doc/build/changelog/unreleased_14/8393.rst deleted file mode 100644 index fab9eb04c7a..00000000000 --- a/doc/build/changelog/unreleased_14/8393.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, postgresql - :tickets: 8393 - :versions: 2.0.0b5 - - Added the PostgreSQL type ``MACADDR8``. - Pull request courtesy of Asim Farooq. diff --git a/doc/build/changelog/unreleased_14/8969.rst b/doc/build/changelog/unreleased_14/8969.rst deleted file mode 100644 index 8458706c809..00000000000 --- a/doc/build/changelog/unreleased_14/8969.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sqlite - :tickets: 8969 - :versions: 2.0.0b5 - - Fixed regression caused by new support for reflection of partial indexes on - SQLite added in 1.4.45 for :ticket:`8804`, where the ``index_list`` pragma - command in very old versions of SQLite (possibly prior to 3.8.9) does not - return the current expected number of columns, leading to exceptions raised - when reflecting tables and indexes. diff --git a/doc/build/changelog/unreleased_14/8974.rst b/doc/build/changelog/unreleased_14/8974.rst deleted file mode 100644 index a8ae491ad19..00000000000 --- a/doc/build/changelog/unreleased_14/8974.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 8974 - :versions: 2.0.0rc1 - - Fixed a long-standing race condition in the connection pool which could - occur under eventlet/gevent monkeypatching schemes in conjunction with the - use of eventlet/gevent ``Timeout`` conditions, where a connection pool - checkout that's interrupted due to the timeout would fail to clean up the - failed state, causing the underlying connection record and sometimes the - database connection itself to "leak", leaving the pool in an invalid state - with unreachable entries. This issue was first identified and fixed in - SQLAlchemy 1.2 for :ticket:`4225`, however the failure modes detected in - that fix failed to accommodate for ``BaseException``, rather than - ``Exception``, which prevented eventlet/gevent ``Timeout`` from being - caught. In addition, a block within initial pool connect has also been - identified and hardened with a ``BaseException`` -> "clean failed connect" - block to accommodate for the same condition in this location. - Big thanks to Github user @niklaus for their tenacious efforts in - identifying and describing this intricate issue. diff --git a/doc/build/changelog/unreleased_14/8983.rst b/doc/build/changelog/unreleased_14/8983.rst deleted file mode 100644 index 35f03dbb78d..00000000000 --- a/doc/build/changelog/unreleased_14/8983.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. change:: - :tags: change, general - :tickets: 8983 - - A new deprecation "uber warning" is now emitted at runtime the - first time any SQLAlchemy 2.0 deprecation warning would normally be - emitted, but the ``SQLALCHEMY_WARN_20`` environment variable is not set. - The warning emits only once at most, before setting a boolean to prevent - it from emitting a second time. - - This deprecation warning intends to notify users who may not have set an - appropriate constraint in their requirements files to block against a - surprise SQLAlchemy 2.0 upgrade and also alert that the SQLAlchemy 2.0 - upgrade process is available, as the first full 2.0 release is expected - very soon. The deprecation warning can be silenced by setting the - environment variable ``SQLALCHEMY_SILENCE_UBER_WARNING`` to ``"1"``. - - .. seealso:: - - :ref:`migration_20_toplevel` diff --git a/doc/build/changelog/unreleased_14/8989.rst b/doc/build/changelog/unreleased_14/8989.rst deleted file mode 100644 index 2287bec7b4d..00000000000 --- a/doc/build/changelog/unreleased_14/8989.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 8989 - :versions: 2.0.0b5 - - Fixed bug where SQL compilation would fail (assertion fail in 2.0, NoneType - error in 1.4) when using an expression whose type included - :meth:`_types.TypeEngine.bind_expression`, in the context of an "expanding" - (i.e. "IN") parameter in conjunction with the ``literal_binds`` compiler - parameter. diff --git a/doc/build/changelog/unreleased_14/8995.rst b/doc/build/changelog/unreleased_14/8995.rst deleted file mode 100644 index 034e7b3f7cd..00000000000 --- a/doc/build/changelog/unreleased_14/8995.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, general - :tickets: 8995 - :versions: 2.0.0rc1 - - Fixed regression where the base compat module was calling upon - ``platform.architecture()`` in order to detect some system properties, - which results in an over-broad system call against the system-level - ``file`` call that is unavailable under some circumstances, including - within some secure environment configurations. diff --git a/doc/build/changelog/unreleased_14/9002.rst b/doc/build/changelog/unreleased_14/9002.rst deleted file mode 100644 index 61d491410d4..00000000000 --- a/doc/build/changelog/unreleased_14/9002.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, tests - :tickets: 9002 - :versions: 2.0.0rc1 - - Added new exclusion rule for third party dialects called - ``unusual_column_name_characters``, which can be "closed" for third party - dialects that don't support column names with unusual characters such as - dots, slashes, or percent signs in them, even if the name is properly - quoted. - diff --git a/doc/build/changelog/unreleased_14/9009.rst b/doc/build/changelog/unreleased_14/9009.rst deleted file mode 100644 index 9520b3e133c..00000000000 --- a/doc/build/changelog/unreleased_14/9009.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 9009 - :versions: 2.0.0b5 - - Added parameter - :paramref:`.FunctionElement.column_valued.joins_implicitly`, which is - useful in preventing the "cartesian product" warning when making use of - table-valued or column-valued functions. This parameter was already - introduced for :meth:`.FunctionElement.table_valued` in :ticket:`7845`, - however it failed to be added for :meth:`.FunctionElement.column_valued` - as well. diff --git a/doc/build/changelog/unreleased_14/9023.rst b/doc/build/changelog/unreleased_14/9023.rst deleted file mode 100644 index 8a2d0b9ca37..00000000000 --- a/doc/build/changelog/unreleased_14/9023.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 9023 - :versions: 2.0.0rc1 - - Fixed bug where the PostgreSQL - :paramref:`_postgresql.Insert.on_conflict_do_update.constraint` parameter - would accept an :class:`.Index` object, however would not expand this index - out into its individual index expressions, instead rendering its name in an - ON CONFLICT ON CONSTRAINT clause, which is not accepted by PostgreSQL; the - "constraint name" form only accepts unique or exclude constraint names. The - parameter continues to accept the index but now expands it out into its - component expressions for the render. diff --git a/doc/build/changelog/unreleased_14/9029.rst b/doc/build/changelog/unreleased_14/9029.rst deleted file mode 100644 index dd3c24ee4cb..00000000000 --- a/doc/build/changelog/unreleased_14/9029.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 9029 - :versions: 2.0.0rc1 - - Fixed issue in lambda SQL feature where the calculated type of a literal - value would not take into account the type coercion rules of the "compared - to type", leading to a lack of typing information for SQL expressions, such - as comparisons to :class:`_types.JSON` elements and similar. diff --git a/doc/build/changelog/unreleased_14/9033.rst b/doc/build/changelog/unreleased_14/9033.rst deleted file mode 100644 index e0e39db3887..00000000000 --- a/doc/build/changelog/unreleased_14/9033.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 9033 - :versions: 2.0.0rc1 - - Fixed issue in the internal SQL traversal for DML statements like - :class:`_dml.Update` and :class:`_dml.Delete` which would cause among other - potential issues, a specific issue using lambda statements with the ORM - update/delete feature. diff --git a/doc/build/changelog/unreleased_14/tox_fix.rst b/doc/build/changelog/unreleased_14/tox_fix.rst deleted file mode 100644 index 7d9b799e01d..00000000000 --- a/doc/build/changelog/unreleased_14/tox_fix.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, tests - :versions: 2.0.0rc1 - - Fixed issue in tox.ini file where changes in the tox 4.0 series to the - format of "passenv" caused tox to not function correctly, in particular - raising an error as of tox 4.0.6. diff --git a/doc/build/conf.py b/doc/build/conf.py index 737026ef281..7c51ff06133 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -213,9 +213,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.45" +release = "1.4.46" -release_date = "December 10, 2022" +release_date = "January 3, 2023" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 0897f86d10695477b57c50e2d7bc0bd6571a04af Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 3 Jan 2023 15:17:21 -0500 Subject: [PATCH 497/632] Version 1.4.47 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index d1ee523a1f8..3f0027fa146 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.47 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.46 :released: January 3, 2023 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 640872ea08b..3b356f9598f 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.46" +__version__ = "1.4.47" def __go(lcls): From 152995c0772d3d74161ddd830aa6f1509d1dd8dd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 4 Jan 2023 09:23:07 -0500 Subject: [PATCH 498/632] include parsed col length field as integer from mysql index reflection Added support to MySQL index reflection to correctly reflect the ``mysql_length`` dictionary, which previously was being ignored. Fixes: #9047 Change-Id: I0a5e27123be68741e12af4464a0fa305052ec36e (cherry picked from commit 4a31f97824095610cfdbc1ac1180fd8690f9f477) --- doc/build/changelog/unreleased_14/9047.rst | 7 ++ lib/sqlalchemy/dialects/mysql/base.py | 12 ++- lib/sqlalchemy/dialects/mysql/reflection.py | 7 +- test/dialect/mysql/test_reflection.py | 87 +++++++++++++++++++++ 4 files changed, 110 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9047.rst diff --git a/doc/build/changelog/unreleased_14/9047.rst b/doc/build/changelog/unreleased_14/9047.rst new file mode 100644 index 00000000000..74110890e8b --- /dev/null +++ b/doc/build/changelog/unreleased_14/9047.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: usecase, mysql + :tickets: 9047 + :versions: 2.0.0 + + Added support to MySQL index reflection to correctly reflect the + ``mysql_length`` dictionary, which previously was being ignored. diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 17916e40448..9948602d3db 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -3033,14 +3033,22 @@ def get_indexes(self, connection, table_name, schema=None, **kw): ] index_d = {} - if dialect_options: - index_d["dialect_options"] = dialect_options index_d["name"] = spec["name"] index_d["column_names"] = [s[0] for s in spec["columns"]] + mysql_length = { + s[0]: s[1] for s in spec["columns"] if s[1] is not None + } + if mysql_length: + dialect_options["%s_length" % self.name] = mysql_length + index_d["unique"] = unique if flavor: index_d["type"] = flavor + + if dialect_options: + index_d["dialect_options"] = dialect_options + indexes.append(index_d) return indexes diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index fae9a387e55..7a4a46b3475 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -322,7 +322,12 @@ def _describe_to_create(self, table_name, columns): def _parse_keyexprs(self, identifiers): """Unpack '"col"(2),"col" ASC'-ish strings into components.""" - return self._re_keyexprs.findall(identifiers) + return [ + (colname, int(length) if length else None, modifiers) + for colname, length, modifiers in self._re_keyexprs.findall( + identifiers + ) + ] def _prep_regexes(self): """Pre-compile regular expressions.""" diff --git a/test/dialect/mysql/test_reflection.py b/test/dialect/mysql/test_reflection.py index 529d352a2ae..a297145abe7 100644 --- a/test/dialect/mysql/test_reflection.py +++ b/test/dialect/mysql/test_reflection.py @@ -759,6 +759,93 @@ def test_reflect_fulltext(self, metadata, connection): "CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)", ) + def test_reflect_index_col_length(self, metadata, connection): + """test for #9047""" + + tt = Table( + "test_table", + metadata, + Column("signal_type", Integer(), nullable=False), + Column("signal_data", String(200), nullable=False), + Column("signal_data_2", String(200), nullable=False), + Index( + "ix_1", + "signal_type", + "signal_data", + mysql_length={"signal_data": 25}, + mariadb_length={"signal_data": 25}, + ), + ) + Index( + "ix_2", + tt.c.signal_type, + tt.c.signal_data, + tt.c.signal_data_2, + mysql_length={"signal_data": 25, "signal_data_2": 10}, + mariadb_length={"signal_data": 25, "signal_data_2": 10}, + ) + + mysql_length = ( + "mysql_length" + if not connection.dialect.is_mariadb + else "mariadb_length" + ) + eq_( + {idx.name: idx.kwargs[mysql_length] for idx in tt.indexes}, + { + "ix_1": {"signal_data": 25}, + "ix_2": {"signal_data": 25, "signal_data_2": 10}, + }, + ) + + metadata.create_all(connection) + + eq_( + sorted( + inspect(connection).get_indexes("test_table"), + key=lambda rec: rec["name"], + ), + [ + { + "name": "ix_1", + "column_names": ["signal_type", "signal_data"], + "unique": False, + "dialect_options": {mysql_length: {"signal_data": 25}}, + }, + { + "name": "ix_2", + "column_names": [ + "signal_type", + "signal_data", + "signal_data_2", + ], + "unique": False, + "dialect_options": { + mysql_length: { + "signal_data": 25, + "signal_data_2": 10, + } + }, + }, + ], + ) + + new_metadata = MetaData() + reflected_table = Table( + "test_table", new_metadata, autoload_with=connection + ) + + eq_( + { + idx.name: idx.kwargs[mysql_length] + for idx in reflected_table.indexes + }, + { + "ix_1": {"signal_data": 25}, + "ix_2": {"signal_data": 25, "signal_data_2": 10}, + }, + ) + @testing.requires.mysql_ngram_fulltext def test_reflect_fulltext_comment( self, From 7b2cb47ba16f48462b0e6b2c8f7c24143e9f7e26 Mon Sep 17 00:00:00 2001 From: Michael Gorven Date: Wed, 4 Jan 2023 12:30:42 -0500 Subject: [PATCH 499/632] [asyncpg] Extract rowcount for SELECT statements Added support to the asyncpg dialect to return the ``cursor.rowcount`` value for SELECT statements when available. While this is not a typical use for ``cursor.rowcount``, the other PostgreSQL dialects generally provide this value. Pull request courtesy Michael Gorven. Fixes: #9048 Closes: #9049 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/9049 Pull-request-sha: df16160530c6001d99de059995ad5047a75fb7b0 Change-Id: I095b866779ccea7e4d50bc841fef7605e61c667f (cherry picked from commit 9c502f5788737fa65029716c73fe0f65f3dafb53) --- doc/build/changelog/unreleased_14/9048.rst | 9 +++++++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 2 +- test/dialect/postgresql/test_dialect.py | 5 +++++ 3 files changed, 15 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/9048.rst diff --git a/doc/build/changelog/unreleased_14/9048.rst b/doc/build/changelog/unreleased_14/9048.rst new file mode 100644 index 00000000000..cf0c818349e --- /dev/null +++ b/doc/build/changelog/unreleased_14/9048.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, postgresql + :tickets: 9048 + :versions: 2.0.0 + + Added support to the asyncpg dialect to return the ``cursor.rowcount`` + value for SELECT statements when available. While this is not a typical use + for ``cursor.rowcount``, the other PostgreSQL dialects generally provide + this value. Pull request courtesy Michael Gorven. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index f9f7e34b548..daf26a0e509 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -443,7 +443,7 @@ async def _prepare_and_execute(self, operation, parameters): status = prepared_stmt.get_statusmsg() reg = re.match( - r"(?:UPDATE|DELETE|INSERT \d+) (\d+)", status + r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)", status ) if reg: self.rowcount = int(reg.group(1)) diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index fa470a18ce1..f32915cbc3f 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -1522,6 +1522,11 @@ def test_initial_transaction_state(self): with engine.connect() as conn: ne_(conn.connection.status, STATUS_IN_TRANSACTION) + def test_select_rowcount(self): + conn = testing.db.connect() + cursor = conn.exec_driver_sql("SELECT 1") + eq_(cursor.rowcount, 1) + class AutocommitTextTest(test_deprecations.AutocommitTextTest): __only_on__ = "postgresql" From 13a5e3ac1ca93e09535f026fc8b9ce36dfbeeb14 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 9 Jan 2023 12:38:10 -0500 Subject: [PATCH 500/632] remove misleading line in post_update this UPDATE refers to the unit of work's need to consider this additional UPDATE as a dependency and not an actual UPDATE statement. Change-Id: I2a520af21ebf96b45c431efa898d4e7683a5bc2d References: #9066 References: #1063 (cherry picked from commit 2ef4b449a7709ecab45e926f0e8d0dd398fd12db) --- lib/sqlalchemy/orm/relationships.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index c85323a2b49..d19420ab856 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -758,9 +758,7 @@ class that will be synchronized with this one. It is usually :param post_update: This indicates that the relationship should be handled by a second UPDATE statement after an INSERT or before a - DELETE. Currently, it also will issue an UPDATE after the - instance was UPDATEd as well, although this technically should - be improved. This flag is used to handle saving bi-directional + DELETE. This flag is used to handle saving bi-directional dependencies between two individual rows (i.e. each row references the other), where it would otherwise be impossible to INSERT or DELETE both rows fully since one row exists before the From 2d41f5d8c4de8074648d0fd10f213a44e94319ca Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 10 Jan 2023 09:51:23 -0500 Subject: [PATCH 501/632] fix ORM support for column-named bindparam() in crud .values() Fixed bug / regression where using :func:`.bindparam()` with the same name as a column in the :meth:`.Update.values` method of :class:`.Update`, as well as the :meth:`.Insert.values` method of :class:`.Insert` in 2.0 only, would in some cases silently fail to honor the SQL expression in which the parameter were presented, replacing the expression with a new parameter of the same name and discarding any other elements of the SQL expression, such as SQL functions, etc. The specific case would be statements that were constructed against ORM entities rather than plain :class:`.Table` instances, but would occur if the statement were invoked with a :class:`.Session` or a :class:`.Connection`. :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is backported to 1.4. For 1.4, also backports the sqlalchemy.testing.Variation update to the variation() API. Fixes: #9075 Change-Id: Ie954bc1f492ec6a566163588182ef4910c7ee452 (cherry picked from commit b5b864e0fe50243a94c0ef04fddda6fa446c1524) --- doc/build/changelog/unreleased_14/9075.rst | 18 +++ lib/sqlalchemy/sql/crud.py | 7 +- lib/sqlalchemy/testing/__init__.py | 1 + lib/sqlalchemy/testing/config.py | 87 +++++++--- test/orm/test_core_compilation.py | 28 ++++ test/sql/test_compiler.py | 176 +++++++++++++++++++++ test/sql/test_insert.py | 6 +- test/sql/test_update.py | 6 +- 8 files changed, 304 insertions(+), 25 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9075.rst diff --git a/doc/build/changelog/unreleased_14/9075.rst b/doc/build/changelog/unreleased_14/9075.rst new file mode 100644 index 00000000000..0d96be77088 --- /dev/null +++ b/doc/build/changelog/unreleased_14/9075.rst @@ -0,0 +1,18 @@ +.. change:: + :tags: bug, sql + :tickets: 9075 + :versions: 2.0.0rc3 + + Fixed bug / regression where using :func:`.bindparam()` with the same name + as a column in the :meth:`.Update.values` method of :class:`.Update`, as + well as the :meth:`.Insert.values` method of :class:`.Insert` in 2.0 only, + would in some cases silently fail to honor the SQL expression in which the + parameter were presented, replacing the expression with a new parameter of + the same name and discarding any other elements of the SQL expression, such + as SQL functions, etc. The specific case would be statements that were + constructed against ORM entities rather than plain :class:`.Table` + instances, but would occur if the statement were invoked with a + :class:`.Session` or a :class:`.Connection`. + + :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is + backported to 1.4. diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index 48ab7212861..4f509d9a562 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -77,14 +77,17 @@ def _get_crud_params(compiler, stmt, compile_state, **kw): if compile_state._has_multi_parameters: spd = compile_state._multi_parameters[0] stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} elif compile_state._ordered_values: spd = compile_state._dict_parameters stmt_parameter_tuples = compile_state._ordered_values + spd_str_key = {_column_as_key(key) for key in spd} elif compile_state._dict_parameters: spd = compile_state._dict_parameters stmt_parameter_tuples = list(spd.items()) + spd_str_key = {_column_as_key(key) for key in spd} else: - stmt_parameter_tuples = spd = None + stmt_parameter_tuples = spd = spd_str_key = None # if we have statement parameters - set defaults in the # compiled params @@ -94,7 +97,7 @@ def _get_crud_params(compiler, stmt, compile_state, **kw): parameters = dict( (_column_as_key(key), REQUIRED) for key in compiler.column_keys - if key not in spd + if key not in spd_str_key ) else: parameters = dict( diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index bfd8cc3dc36..28bc3c5efd2 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -50,6 +50,7 @@ from .config import fixture from .config import requirements as requires from .config import skip_test +from .config import Variation from .config import variation from .exclusions import _is_excluded from .exclusions import _server_version diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index e61bf2694a1..ab52d233da9 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -94,21 +94,57 @@ def combinations_list(arg_iterable, **kw): return combinations(*arg_iterable, **kw) -class _variation_base(object): - __slots__ = ("name", "argname") +class Variation(object): + __slots__ = ("_name", "_argname") def __init__(self, case, argname, case_names): - self.name = case - self.argname = argname + self._name = case + self._argname = argname for casename in case_names: setattr(self, casename, casename == case) + @property + def name(self): + return self._name + def __bool__(self): - return self.name == self.argname + return self._name == self._argname def __nonzero__(self): return not self.__bool__() + def __str__(self): + return "%s=%r" % (self._argname, self._name) + + def __repr__(self): + return str(self) + + def fail(self): + # can't import util.fail() under py2.x without resolving + # import cycle + assert False, "Unknown %s" % (self,) + + @classmethod + def idfn(cls, variation): + return variation.name + + @classmethod + def generate_cases(cls, argname, cases): + case_names = [ + argname if c is True else "not_" + argname if c is False else c + for c in cases + ] + + typ = type( + argname, + (Variation,), + { + "__slots__": tuple(case_names), + }, + ) + + return [typ(casename, argname, case_names) for casename in case_names] + def variation(argname, cases): """a helper around testing.combinations that provides a single namespace @@ -138,7 +174,7 @@ class Thing(decl_base): elif querytyp.legacy_query: stmt = Session.query(Thing) else: - assert False + querytyp.fail() The variable provided is a slots object of boolean variables, as well @@ -146,26 +182,35 @@ class Thing(decl_base): """ - case_names = [ - argname if c is True else "not_" + argname if c is False else c - for c in cases + cases_plus_limitations = [ + entry + if (isinstance(entry, tuple) and len(entry) == 2) + else (entry, None) + for entry in cases ] - typ = type( - argname, - (_variation_base,), - { - "__slots__": tuple(case_names), - }, + variations = Variation.generate_cases( + argname, [c for c, l in cases_plus_limitations] ) - return combinations( - *[ - (casename, typ(casename, argname, case_names)) - for casename in case_names - ], id_="ia", - argnames=argname + argnames=argname, + *[ + (variation._name, variation, limitation) + if limitation is not None + else (variation._name, variation) + for variation, (case, limitation) in zip( + variations, cases_plus_limitations + ) + ] + ) + + +def variation_fixture(argname, cases, scope="function"): + return fixture( + params=Variation.generate_cases(argname, cases), + ids=Variation.idfn, + scope=scope, ) diff --git a/test/orm/test_core_compilation.py b/test/orm/test_core_compilation.py index 16bdbf2fd4d..c5a76f04f7b 100644 --- a/test/orm/test_core_compilation.py +++ b/test/orm/test_core_compilation.py @@ -40,12 +40,14 @@ from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import Variation from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.fixtures import fixture_session from sqlalchemy.testing.util import resolve_lambda from sqlalchemy.util.langhelpers import hybridproperty from .inheritance import _poly_fixtures from .test_query import QueryTest +from ..sql import test_compiler from ..sql.test_compiler import CorrelateTest as _CoreCorrelateTest # TODO: @@ -2643,3 +2645,29 @@ def c(self): def _fixture(self): t1, t2 = self.classes("T1", "T2") return t1, t2, select(t1).where(t1.c.a == t2.c.a) + + +class CrudParamOverlapTest(test_compiler.CrudParamOverlapTest): + @testing.fixture( + params=Variation.generate_cases("type_", ["orm"]), + ids=["orm"], + ) + def crud_table_fixture(self, request): + type_ = request.param + + if type_.orm: + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + class Foo(Base): + __tablename__ = "mytable" + myid = Column(Integer, primary_key=True) + name = Column(String) + description = Column(String) + + table1 = Foo + else: + type_.fail() + + yield table1 diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 9ede4af9237..79826d2fb8d 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -33,6 +33,7 @@ from sqlalchemy import ForeignKey from sqlalchemy import func from sqlalchemy import Index +from sqlalchemy import insert from sqlalchemy import Integer from sqlalchemy import intersect from sqlalchemy import join @@ -61,6 +62,7 @@ from sqlalchemy import types from sqlalchemy import union from sqlalchemy import union_all +from sqlalchemy import update from sqlalchemy import util from sqlalchemy.dialects import mysql from sqlalchemy.dialects import oracle @@ -97,6 +99,7 @@ from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing import ne_ +from sqlalchemy.testing import Variation from sqlalchemy.testing.schema import pep435_enum from sqlalchemy.types import UserDefinedType from sqlalchemy.util import u @@ -4907,6 +4910,179 @@ def test_standalone_bindparam_escape_expanding( ) +class CrudParamOverlapTest(AssertsCompiledSQL, fixtures.TestBase): + """tests for #9075. + + we apparently allow same-column-named bindparams in values(), even though + we do *not* allow same-column-named bindparams in other parts of the + statement, but only if the bindparam is associated with that column in the + VALUES / SET clause. If you use a name that matches that of a column in + values() but associate it with a different column, you also get the error. + + This is supported, see + test_insert.py::InsertTest::test_binds_that_match_columns and + test_update.py::UpdateTest::test_binds_that_match_columns. The use + case makes sense because the "overlapping binds" issue is that using + a column name in bindparam() will conflict with the bindparam() + that crud.py is going to make for that column in VALUES / SET; but if we + are replacing the actual expression that would be in VALUES / SET, then + it's fine, there is no conflict. + + The test suite is extended in + test/orm/test_core_compilation.py with ORM mappings that caused + the failure that was fixed by #9075. + + + """ + + __dialect__ = "default" + + @testing.fixture( + params=Variation.generate_cases("type_", ["lowercase", "uppercase"]), + ids=["lowercase", "uppercase"], + ) + def crud_table_fixture(self, request): + type_ = request.param + + if type_.lowercase: + table1 = table( + "mytable", + column("myid", Integer), + column("name", String), + column("description", String), + ) + elif type_.uppercase: + table1 = Table( + "mytable", + MetaData(), + Column("myid", Integer), + Column("name", String), + Column("description", String), + ) + else: + type_.fail() + + yield table1 + + def test_same_named_binds_insert_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = insert(table1).values( + myid=bindparam("myid"), + description=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, description) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + ) + + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, description) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + params={"myid": 5, "description": "foo"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + }, + ) + + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, name, description) VALUES " + "(:myid, :name, coalesce(:description, :coalesce_1))", + params={"myid": 5, "description": "foo", "name": "bar"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + "name": "bar", + }, + ) + + def test_same_named_binds_update_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = update(table1).values( + myid=bindparam("myid"), + description=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "description=coalesce(:description, :coalesce_1)", + ) + + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "description=coalesce(:description, :coalesce_1)", + params={"myid": 5, "description": "foo"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + }, + ) + + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, name=:name, " + "description=coalesce(:description, :coalesce_1)", + params={"myid": 5, "description": "foo", "name": "bar"}, + checkparams={ + "coalesce_1": "default", + "description": "foo", + "myid": 5, + "name": "bar", + }, + ) + + def test_different_named_binds_insert_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = insert(table1).values( + myid=bindparam("myid"), + name=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "INSERT INTO mytable (myid, name) VALUES " + "(:myid, coalesce(:description, :coalesce_1))", + ) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description"]) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description", "name"]) + + def test_different_named_binds_update_values(self, crud_table_fixture): + table1 = crud_table_fixture + stmt = update(table1).values( + myid=bindparam("myid"), + name=func.coalesce(bindparam("description"), "default"), + ) + self.assert_compile( + stmt, + "UPDATE mytable SET myid=:myid, " + "name=coalesce(:description, :coalesce_1)", + ) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description"]) + + with expect_raises_message( + exc.CompileError, r"bindparam\(\) name 'description' is reserved " + ): + stmt.compile(column_keys=["myid", "description", "name"]) + + class UnsupportedTest(fixtures.TestBase): def test_unsupported_element_str_visit_name(self): from sqlalchemy.sql.expression import ClauseElement diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py index 741859fb2cf..c052ac5da43 100644 --- a/test/sql/test_insert.py +++ b/test/sql/test_insert.py @@ -68,7 +68,11 @@ class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL): def test_binds_that_match_columns(self): """test bind params named after column names - replace the normal SET/VALUES generation.""" + replace the normal SET/VALUES generation. + + See also test_compiler.py::CrudParamOverlapTest + + """ t = table("foo", column("x"), column("y")) diff --git a/test/sql/test_update.py b/test/sql/test_update.py index 93deae5565e..214fb913fa6 100644 --- a/test/sql/test_update.py +++ b/test/sql/test_update.py @@ -316,7 +316,11 @@ def test_correlated_update_seven(self): def test_binds_that_match_columns(self): """test bind params named after column names - replace the normal SET/VALUES generation.""" + replace the normal SET/VALUES generation. + + See also test_compiler.py::CrudParamOverlapTest + + """ t = table("foo", column("x"), column("y")) From 1527720f2dfbb8fec74917dc7930cea78a35429e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 12 Jan 2023 11:25:39 -0500 Subject: [PATCH 502/632] add with_loader_criteria() test for #8064 / #9091 test related to #8064, added after discussion #9091 which requested this behavior for with_loader_criteria() where it was found to be working as of this issue, just not tested Change-Id: I703f25af3708e49380b6004badd3a8ffb783ef70 References: #8064 References: #9091 (cherry picked from commit a2bf6f1bb4a56dfe6c6d1fe22c3a4e84f5da99bc) --- test/orm/test_relationship_criteria.py | 62 ++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index ed4ab32950a..e866fe01862 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -15,6 +15,7 @@ from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.orm import aliased +from sqlalchemy.orm import column_property from sqlalchemy.orm import defer from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload @@ -57,6 +58,35 @@ def user_address_fixture(self): ) return User, Address + @testing.fixture + def user_address_col_property_fixture(self): + users, Address, addresses, User = ( + self.tables.users, + self.classes.Address, + self.tables.addresses, + self.classes.User, + ) + + self.mapper_registry.map_imperatively(Address, addresses) + + self.mapper_registry.map_imperatively( + User, + users, + properties={ + "addresses": relationship( + Address, + order_by=Address.id, + ), + "num_addresses": column_property( + select(func.count(Address.id)) + .where(Address.user_id == users.c.id) + .correlate_except(Address) + .scalar_subquery() + ), + }, + ) + return User, Address + @testing.fixture def user_address_custom_strat_fixture(self): users, Address, addresses, User = ( @@ -454,6 +484,38 @@ def test_criteria_post_replace_legacy(self, user_address_fixture): "WHERE users.name != :name_1", ) + def test_criteria_applies_to_column_property( + self, user_address_col_property_fixture + ): + """test related to #8064, added after discussion #9091 which + requested this behavior for with_loader_criteria() where it was + found to be working as of this issue, just not tested""" + + User, Address = user_address_col_property_fixture + + stmt = select(User) + + self.assert_compile( + stmt, + "SELECT (SELECT count(addresses.id) AS count_1 FROM addresses " + "WHERE addresses.user_id = users.id) AS anon_1, " + "users.id, users.name FROM users", + ) + + stmt = select(User).options( + with_loader_criteria( + Address, Address.email_address != "email_address" + ) + ) + + self.assert_compile( + stmt, + "SELECT (SELECT count(addresses.id) AS count_1 FROM addresses " + "WHERE addresses.user_id = users.id AND " + "addresses.email_address != :email_address_1) AS anon_1, " + "users.id, users.name FROM users", + ) + def test_select_from_mapper_mapper_criteria(self, user_address_fixture): User, Address = user_address_fixture From ae57d5b7b4ee0d5bf3b15f91f86236eb16120b0f Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 11 Jan 2023 20:33:16 +0100 Subject: [PATCH 503/632] Move docs in mysql dbapi Fixes: #9084 Change-Id: I5e174c318a20b7fcb5ea7c771293c5102e761ed7 (cherry picked from commit a06cd94baab114cd0b2fa0987267e31811d38f7a) --- lib/sqlalchemy/dialects/mysql/mysqldb.py | 13 ------------- lib/sqlalchemy/dialects/mysql/pymysql.py | 14 +++++++++++++- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 9e31f4ba2af..7eef5185499 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -63,19 +63,6 @@ "&ssl_key=/home/gord/client-ssl/client-key.pem" ) -If the server uses an automatically-generated certificate that is self-signed -or does not match the host name (as seen from the client), it may also be -necessary to indicate ``ssl_check_hostname=false``:: - - connection_uri = ( - "mysql+pymysql://scott:tiger@192.168.0.134/test" - "?ssl_ca=/home/gord/client-ssl/ca.pem" - "&ssl_cert=/home/gord/client-ssl/client-cert.pem" - "&ssl_key=/home/gord/client-ssl/client-key.pem" - "&ssl_check_hostname=false" - ) - - .. seealso:: :ref:`pymysql_ssl` in the PyMySQL dialect diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 5e234fbec1f..3a776f8775e 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -25,7 +25,19 @@ ------------------ The PyMySQL DBAPI accepts the same SSL arguments as that of MySQLdb, -described at :ref:`mysqldb_ssl`. See that section for examples. +described at :ref:`mysqldb_ssl`. See that section for additional examples. + +If the server uses an automatically-generated certificate that is self-signed +or does not match the host name (as seen from the client), it may also be +necessary to indicate ``ssl_check_hostname=false`` in PyMySQL:: + + connection_uri = ( + "mysql+pymysql://scott:tiger@192.168.0.134/test" + "?ssl_ca=/home/gord/client-ssl/ca.pem" + "&ssl_cert=/home/gord/client-ssl/client-cert.pem" + "&ssl_key=/home/gord/client-ssl/client-key.pem" + "&ssl_check_hostname=false" + ) MySQL-Python Compatibility From 8fe21a435e47a62042c31ed43dcf738d76d397bd Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 12 Jan 2023 23:08:33 +0100 Subject: [PATCH 504/632] Remove missing doc section Change-Id: Ic6dda7f32a7561a0c0a92b8a7c08e44cb174eec1 --- doc/build/core/operators.rst | 8 -------- 1 file changed, 8 deletions(-) diff --git a/doc/build/core/operators.rst b/doc/build/core/operators.rst index 10b6db33805..d3da3c60821 100644 --- a/doc/build/core/operators.rst +++ b/doc/build/core/operators.rst @@ -671,14 +671,6 @@ The above conjunction functions :func:`_sql.and_`, :func:`_sql.or_`, .. - - -Operator Customization -^^^^^^^^^^^^^^^^^^^^^^ - -TODO - - .. Setup code, not for display >>> conn.close() From 04b868e2782601af1d7118a6242abfc6c130cc23 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 15 Jan 2023 11:28:22 -0500 Subject: [PATCH 505/632] fix orm-header for standalone Change-Id: I20e7f8ad5800c2ce5cdc9645c4dbe66f53b8cdf8 (cherry picked from commit 48eed9eb9c6ba853b48e41ef6038ec97c5a3fb68) --- doc/build/tutorial/dbapi_transactions.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 634b1f6cdcd..4eeee79e592 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -396,7 +396,7 @@ for this use case. generally used for updating or deleting many individual rows separately. -.. rst-class:: orm-addin +.. rst-class:: orm-header .. _tutorial_executing_orm_session: From 1762c40490182fc511b71f5044f7252e2937162b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 16 Jan 2023 16:02:07 -0500 Subject: [PATCH 506/632] apply changelog note for change of column loader options Change-Id: Ib9a69e20420e1fda755f4533c5f90bc08ba11b48 References: #8879 References: #9101 (cherry picked from commit 5e6cb26a017fb72bc3f925194c6f8c1eda968fbe) --- doc/build/changelog/migration_14.rst | 169 +++++++++++++++++++++++++ doc/build/orm/mapped_sql_expr.rst | 82 +++++++++++- lib/sqlalchemy/orm/strategy_options.py | 9 +- 3 files changed, 258 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index 089715bf6dd..85c8c1d3f30 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -2280,6 +2280,175 @@ to be more noticeable. :ticket:`1763` +.. _change_8879: + +Column loaders such as ``deferred()``, ``with_expression()`` only take effect when indicated on the outermost, full entity query +-------------------------------------------------------------------------------------------------------------------------------- + +.. note:: This change note was not present in earlier versions of this document, + however is relevant for all SQLAlchemy 1.4 versions. + +A behavior that was never supported in 1.3 and previous versions +yet nonetheless would have a particular effect +was to repurpose column loader options such as :func:`_orm.defer` and +:func:`_orm.with_expression` in subqueries in order to control which +SQL expressions would be in the columns clause of each subquery. A typical +example would be to +construct UNION queries, such as:: + + q1 = session.query(User).options(with_expression(User.expr, literal("u1"))) + q2 = session.query(User).options(with_expression(User.expr, literal("u2"))) + + q1.union_all(q2).all() + +In version 1.3, the :func:`_orm.with_expression` option would take effect +for each element of the UNION, such as: + +.. sourcecode:: sql + + SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.user_account_id AS anon_1_user_account_id, + anon_1.user_account_name AS anon_1_user_account_name + FROM ( + SELECT ? AS anon_2, user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT ? AS anon_3, user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_1 + ('u1', 'u2') + +SQLAlchemy 1.4's notion of loader options has been made more strict, and as such +are applied to the **outermost part of the query only**, which is the +SELECT that is intended to populate the actual ORM entities to be returned; the +query above in 1.4 will produce: + +.. sourcecode:: sql + + SELECT ? AS anon_1, anon_2.user_account_id AS anon_2_user_account_id, + anon_2.user_account_name AS anon_2_user_account_name + FROM ( + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_2 + ('u1',) + +that is, the options for the :class:`_orm.Query` were taken from the first +element of the UNION, since all loader options are only to be at the topmost +level. The option from the second query was ignored. + +Rationale +^^^^^^^^^ + +This behavior now more closely matches that of other kinds of loader options +such as relationship loader options like :func:`_orm.joinedload` in all +SQLAlchemy versions, 1.3 and earlier included, which in a UNION situation were +already copied out to the top most level of the query, and only taken from the +first element of the UNION, discarding any options on other parts of the query. + +This implicit copying and selective ignoring of options, demonstrated above as +being fairly arbitrary, is a legacy behavior that's only part of +:class:`_orm.Query`, and is a particular example of where :class:`_orm.Query` +and its means of applying :meth:`_orm.Query.union_all` falls short, as it's +ambiguous how to turn a single SELECT into a UNION of itself and another query +and how loader options should be applied to that new statement. + +SQLAlchemy 1.4's behavior can be demonstrated as generally superior to that +of 1.3 for a more common case of using :func:`_orm.defer`. The following +query:: + + q1 = session.query(User).options(defer(User.name)) + q2 = session.query(User).options(defer(User.name)) + + q1.union_all(q2).all() + +In 1.3 would awkwardly add NULL to the inner queries and then SELECT it: + +.. sourcecode:: sql + + SELECT anon_1.anon_2 AS anon_1_anon_2, anon_1.user_account_id AS anon_1_user_account_id + FROM ( + SELECT NULL AS anon_2, user_account.id AS user_account_id + FROM user_account + UNION ALL + SELECT NULL AS anon_2, user_account.id AS user_account_id + FROM user_account + ) AS anon_1 + +If all queries didn't have the identical options set up, the above scenario +would raise an error due to not being able to form a proper UNION. + +Whereas in 1.4, the option is applied only at the top layer, omitting +the fetch for ``User.name``, and this complexity is avoided: + +.. sourcecode:: sql + + SELECT anon_1.user_account_id AS anon_1_user_account_id + FROM ( + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + UNION ALL + SELECT user_account.id AS user_account_id, user_account.name AS user_account_name + FROM user_account + ) AS anon_1 + +Correct Approach +^^^^^^^^^^^^^^^^ + +Using :term:`2.0-style` querying, no warning is emitted at the moment, however +the nested :func:`_orm.with_expression` options are consistently ignored as +they don't apply to an entity being loaded, and are not implicitly copied +anywhere. The query below produces no output for the +:func:`_orm.with_expression` calls:: + + s1 = select(User).options(with_expression(User.expr, literal("u1"))) + s2 = select(User).options(with_expression(User.expr, literal("u2"))) + + stmt = union_all(s1, s2) + + session.scalars(select(User).from_statement(stmt)).all() + +producing the SQL: + +.. sourcecode:: sql + + SELECT user_account.id, user_account.name + FROM user_account + UNION ALL + SELECT user_account.id, user_account.name + FROM user_account + +To correctly apply :func:`_orm.with_expression` to the ``User`` entity, +it should be applied to the outermost level of the query, using an +ordinary SQL expression inside the columns clause of each SELECT:: + + s1 = select(User, literal("u1").label("some_literal")) + s2 = select(User, literal("u2").label("some_literal")) + + stmt = union_all(s1, s2) + + session.scalars( + select(User) + .from_statement(stmt) + .options(with_expression(User.expr, stmt.selected_columns.some_literal)) + ).all() + +Which will produce the expected SQL: + +.. sourcecode:: sql + + SELECT user_account.id, user_account.name, ? AS some_literal + FROM user_account + UNION ALL + SELECT user_account.id, user_account.name, ? AS some_literal + FROM user_account + +The ``User`` objects themselves will include this expression in their +contents underneath ``User.expr``. + + .. _change_4519: Accessing an uninitialized collection attribute on a transient object no longer mutates __dict__ diff --git a/doc/build/orm/mapped_sql_expr.rst b/doc/build/orm/mapped_sql_expr.rst index 47af9b22c4c..0e93e5e920c 100644 --- a/doc/build/orm/mapped_sql_expr.rst +++ b/doc/build/orm/mapped_sql_expr.rst @@ -365,7 +365,17 @@ The :func:`.query_expression` mapping has these caveats: a new :func:`.with_expression` directive will the attribute be set to a non-None value. -* The mapped attribute currently **cannot** be applied to other parts of the +* :func:`_orm.with_expression`, as an object loading option, only takes effect + on the **outermost part + of a query** and only for a query against a full entity, and not for arbitrary + column selects, within subqueries, or the elements of a compound + statement such as a UNION. See the next + section :ref:`mapper_querytime_expression_unions` for an example. + + .. versionchanged:: 1.4 This is new as of version 1.4. See the change notes + at :ref:`change_8879` for background. + +* The mapped attribute **cannot** be applied to other parts of the query, such as the WHERE clause, the ORDER BY clause, and make use of the ad-hoc expression; that is, this won't work:: @@ -391,3 +401,73 @@ The :func:`.query_expression` mapping has these caveats: .. versionadded:: 1.2 + +.. _mapper_querytime_expression_unions: + + +Using ``with_expression()`` with UNIONs, other subqueries +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The :func:`_orm.with_expression` construct is an ORM loader option, and as +such may only be applied to the outermost level of a SELECT statement which +is to load a particular ORM entity. It does not have any effect if used +inside of a :func:`_sql.select` that will then be used as a subquery or +as an element within a compound statement such as a UNION. + +.. versionchanged:: 1.4 The behavior of column loader options applying + only at the outermost layer of an ORM SELECT statement was previously + not applied consistently; in 1.4 it applies to all loader options + for both columns as well as relationships. Background on this change + is at :ref:`change_8879`. + +In order to use arbitrary SQL expressions in subqueries, normal Core-style +means of adding expressions should be used. To assemble a subquery-derived +expression onto the ORM entity's :func:`_orm.query_expression` attributes, +:func:`_orm.with_expression` is used at the top layer of ORM object loading, +referencing the SQL expression within the subquery. + +.. note:: + + The example below uses :term:`2.0 style` queries in order to demonstrate a + UNION. ORM UNIONs may be assembled without ambiguity using this style + of query. + +In the example below, two :func:`_sql.select` constructs are used against +the ORM entity ``A`` with an additional SQL expression labeled in +``expr``, and combined using :func:`_sql.union_all`. Then, at the topmost +layer, the ``A`` entity is SELECTed from this UNION, using the +querying technique described at :ref:`orm_queryguide_unions`, adding an +option with :func:`_orm.with_expression` to extract this SQL expression +onto newly loaded instances of ``A``: + +.. sourcecode:: pycon+sql + + >>> from sqlalchemy import union_all + >>> s1 = ( + ... select(User, func.count(Book.id).label("book_count")) + ... .join_from(User, Book) + ... .where(User.name == "spongebob") + ... ) + >>> s2 = ( + ... select(User, func.count(Book.id).label("book_count")) + ... .join_from(User, Book) + ... .where(User.name == "sandy") + ... ) + >>> union_stmt = union_all(s1, s2) + >>> orm_stmt = ( + ... select(User) + ... .from_statement(union_stmt) + ... .options(with_expression(User.book_count, union_stmt.c.book_count)) + ... ) + >>> for user in session.scalars(orm_stmt): + ... print(f"Username: {user.name} Number of books: {user.book_count}") + {execsql}SELECT user_account.id, user_account.name, user_account.fullname, count(book.id) AS book_count + FROM user_account JOIN book ON user_account.id = book.owner_id + WHERE user_account.name = ? + UNION ALL + SELECT user_account.id, user_account.name, user_account.fullname, count(book.id) AS book_count + FROM user_account JOIN book ON user_account.id = book.owner_id + WHERE user_account.name = ? + [...] ('spongebob', 'sandy'){stop} + Username: spongebob Number of books: 3 + Username: sandy Number of books: 3 diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index a4fe6f90f83..170847f42e8 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1955,10 +1955,17 @@ def with_expression(loadopt, key, expression): .. versionadded:: 1.2 - :param key: Attribute to be undeferred. + :param key: Attribute to be populated. :param expr: SQL expression to be applied to the attribute. + .. versionchanged:: 1.4 Loader options such as + :func:`_orm.with_expression` + take effect only at the **outermost** query used, and should not be used + within subqueries or inner elements of a UNION. See the change notes at + :ref:`change_8879` for background on how to correctly add arbitrary + columns to subqueries. + .. note:: the target attribute is populated only if the target object is **not currently loaded** in the current :class:`_orm.Session` unless the :meth:`_query.Query.populate_existing` method is used. From 2a53f70eeed0c39ff13e0c57086443e8714c8142 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 16 Jan 2023 20:17:50 -0500 Subject: [PATCH 507/632] mypy plugin fixes Adjustments made to the mypy plugin to accommodate for some potential changes being made for issue #236 sqlalchemy2-stubs when using SQLAlchemy 1.4. These changes are being kept in sync within SQLAlchemy 2.0. The changes are also backwards compatible with older versions of sqlalchemy2-stubs. Fixed crash in mypy plugin which could occur on both 1.4 and 2.0 versions if a decorator for the :func:`_orm.registry.mapped` decorator were used that was referenced in an expression with more than two components (e.g. ``@Backend.mapper_registry.mapped``). This scenario is now ignored; when using the plugin, the decorator expression needs to be two components (i.e. ``@reg.mapped``). References: https://github.com/sqlalchemy/sqlalchemy2-stubs/issues/236 Fixes: #9102 Change-Id: Ieb1bf7bf8184645bcd43253e57f1c267b2640537 (cherry picked from commit cf64582f61b15716228302f669322d7efa1003c1) (cherry picked from commit 36285760238314f70eed4532ca2c2c0c2d684609) --- .../changelog/unreleased_14/mypy_fix.rst | 22 +++++++++++++++++++ lib/sqlalchemy/ext/mypy/apply.py | 22 ++++++++++++++++--- lib/sqlalchemy/ext/mypy/plugin.py | 11 ++++++---- test/ext/mypy/files/issue_9102.py | 18 +++++++++++++++ test/ext/mypy/files/issue_9102_workaround.py | 19 ++++++++++++++++ test/ext/mypy/test_mypy_plugin_py3k.py | 4 ++++ 6 files changed, 89 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/mypy_fix.rst create mode 100644 test/ext/mypy/files/issue_9102.py create mode 100644 test/ext/mypy/files/issue_9102_workaround.py diff --git a/doc/build/changelog/unreleased_14/mypy_fix.rst b/doc/build/changelog/unreleased_14/mypy_fix.rst new file mode 100644 index 00000000000..d383c772876 --- /dev/null +++ b/doc/build/changelog/unreleased_14/mypy_fix.rst @@ -0,0 +1,22 @@ +.. change:: + :tags: bug, mypy + :versions: 2.0.0rc3 + + Adjustments made to the mypy plugin to accommodate for some potential + changes being made for issue #236 sqlalchemy2-stubs when using SQLAlchemy + 1.4. These changes are being kept in sync within SQLAlchemy 2.0. + The changes are also backwards compatible with older versions of + sqlalchemy2-stubs. + + +.. change:: + :tags: bug, mypy + :tickets: 9102 + :versions: 2.0.0rc3 + + Fixed crash in mypy plugin which could occur on both 1.4 and 2.0 versions + if a decorator for the :func:`_orm.registry.mapped` decorator were used + that was referenced in an expression with more than two components (e.g. + ``@Backend.mapper_registry.mapped``). This scenario is now ignored; when + using the plugin, the decorator expression needs to be two components (i.e. + ``@reg.mapped``). diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index 99be194cdcc..ad81c15b1d8 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -164,7 +164,10 @@ def re_apply_declarative_assignments( update_cls_metadata = True - if python_type_for_type is not None: + if python_type_for_type is not None and ( + not isinstance(left_node.type, Instance) + or left_node.type.type.fullname != NAMED_TYPE_SQLA_MAPPED + ): left_node.type = api.named_type( NAMED_TYPE_SQLA_MAPPED, [python_type_for_type] ) @@ -201,15 +204,23 @@ class User(Base): left_node = lvalue.node assert isinstance(left_node, Var) + # to be completely honest I have no idea what the difference between + # left_node.type and stmt.type is, what it means if these are different + # vs. the same, why in order to get tests to pass I have to assign + # to stmt.type for the second case and not the first. this is complete + # trying every combination until it works stuff. + if left_hand_explicit_type is not None: left_node.type = api.named_type( NAMED_TYPE_SQLA_MAPPED, [left_hand_explicit_type] ) else: lvalue.is_inferred_def = False - left_node.type = api.named_type( + left_node.type = stmt.type = api.named_type( NAMED_TYPE_SQLA_MAPPED, - [] if python_type_for_type is None else [python_type_for_type], + [AnyType(TypeOfAny.special_form)] + if python_type_for_type is None + else [python_type_for_type], ) # so to have it skip the right side totally, we can do this: @@ -226,6 +237,11 @@ class User(Base): # internally stmt.rvalue = util.expr_to_mapped_constructor(stmt.rvalue) + if stmt.type is None or python_type_for_type is None: + stmt.type = api.named_type( + NAMED_TYPE_SQLA_MAPPED, [AnyType(TypeOfAny.special_form)] + ) + def add_additional_orm_attributes( cls: ClassDef, diff --git a/lib/sqlalchemy/ext/mypy/plugin.py b/lib/sqlalchemy/ext/mypy/plugin.py index 8687012a1e4..bd2dd79d62a 100644 --- a/lib/sqlalchemy/ext/mypy/plugin.py +++ b/lib/sqlalchemy/ext/mypy/plugin.py @@ -184,10 +184,13 @@ def _fill_in_decorators(ctx: ClassDefContext) -> None: else: continue - assert isinstance(target.expr, NameExpr) - sym = ctx.api.lookup_qualified( - target.expr.name, target, suppress_errors=True - ) + if isinstance(target.expr, NameExpr): + sym = ctx.api.lookup_qualified( + target.expr.name, target, suppress_errors=True + ) + else: + continue + if sym and sym.node: sym_type = get_proper_type(sym.type) if isinstance(sym_type, Instance): diff --git a/test/ext/mypy/files/issue_9102.py b/test/ext/mypy/files/issue_9102.py new file mode 100644 index 00000000000..a9eea7c606b --- /dev/null +++ b/test/ext/mypy/files/issue_9102.py @@ -0,0 +1,18 @@ +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy.orm import registry + + +class BackendMeta: + __abstract__ = True + mapped_registry: registry = registry() + metadata = mapped_registry.metadata + + +# this decorator is not picked up now, but at least it doesn't crash +@BackendMeta.mapped_registry.mapped +class User: + __tablename__ = "user" + + # EXPECTED_MYPY: Incompatible types in assignment (expression has type "Column[Integer]", variable has type "int") # noqa: E501 + id: int = Column(Integer(), primary_key=True) diff --git a/test/ext/mypy/files/issue_9102_workaround.py b/test/ext/mypy/files/issue_9102_workaround.py new file mode 100644 index 00000000000..3682d29b237 --- /dev/null +++ b/test/ext/mypy/files/issue_9102_workaround.py @@ -0,0 +1,19 @@ +from sqlalchemy import Column +from sqlalchemy import Integer +from sqlalchemy.orm import registry + + +class BackendMeta: + __abstract__ = True + mapped_registry: registry = registry() + metadata = mapped_registry.metadata + + +reg: registry = BackendMeta.mapped_registry + + +@reg.mapped +class User: + __tablename__ = "user" + + id: int = Column(Integer(), primary_key=True) diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index 3df758c56db..cb04d1c739a 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -76,6 +76,10 @@ def run(path, use_plugin=True, incremental=False): shutil.copyfile(path, test_program) args.append(test_program) + # I set this locally but for the suite here needs to be + # disabled + os.environ.pop("MYPY_FORCE_COLOR", None) + result = api.run(args) return result From 102114accc5a7caf76629c4c2ab108ddbfcbe81a Mon Sep 17 00:00:00 2001 From: Shan Date: Sun, 22 Jan 2023 11:19:11 -0500 Subject: [PATCH 508/632] Run bracket interpretation for reflection Fixed bug where a schema name given with brackets, but no dots inside the name, for parameters such as :paramref:`_schema.Table.schema` would not be interpreted within the context of the SQL Server dialect's documented behavior of interpreting explicit brackets as token delimiters, first added in 1.2 for #2626, when referring to the schema name in reflection operations. The original assumption for #2626's behavior was that the special interpretation of brackets was only significant if dots were present, however in practice, the brackets are not included as part of the identifier name for all SQL rendering operations since these are not valid characters within regular or delimited identifiers. Pull request courtesy Shan. Fixes: #9133 Closes: #9134 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/9134 Pull-request-sha: 5dac87c82cd3063dd8e50f0075c7c00330be6439 Change-Id: I7a507bc38d75a04ffcb7e920298775baae22c6d1 (cherry picked from commit aa50375a9aa72be896a7cf3afbbbec161c7111bd) --- doc/build/changelog/unreleased_14/9133.rst | 16 +++++++++ lib/sqlalchemy/dialects/mssql/base.py | 4 +-- test/dialect/mssql/test_compiler.py | 41 ++++++++++++++++++++++ test/dialect/mssql/test_reflection.py | 29 +++++++++++++++ 4 files changed, 87 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9133.rst diff --git a/doc/build/changelog/unreleased_14/9133.rst b/doc/build/changelog/unreleased_14/9133.rst new file mode 100644 index 00000000000..29e05f5fe9d --- /dev/null +++ b/doc/build/changelog/unreleased_14/9133.rst @@ -0,0 +1,16 @@ +.. change:: + :tags: bug, mssql + :tickets: 9133 + + Fixed bug where a schema name given with brackets, but no dots inside the + name, for parameters such as :paramref:`_schema.Table.schema` would not be + interpreted within the context of the SQL Server dialect's documented + behavior of interpreting explicit brackets as token delimiters, first added + in 1.2 for #2626, when referring to the schema name in reflection + operations. The original assumption for #2626's behavior was that the + special interpretation of brackets was only significant if dots were + present, however in practice, the brackets are not included as part of the + identifier name for all SQL rendering operations since these are not valid + characters within regular or delimited identifiers. Pull request courtesy + Shan. + diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 948d3afb063..db741d84aaf 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2664,10 +2664,8 @@ def _switch_db(dbname, connection, fn, *arg, **kw): def _owner_plus_db(dialect, schema): if not schema: return None, dialect.default_schema_name - elif "." in schema: - return _schema_elements(schema) else: - return None, schema + return _schema_elements(schema) _memoized_schema = util.LRUCache() diff --git a/test/dialect/mssql/test_compiler.py b/test/dialect/mssql/test_compiler.py index d54295b3062..a385ad12083 100644 --- a/test/dialect/mssql/test_compiler.py +++ b/test/dialect/mssql/test_compiler.py @@ -585,6 +585,47 @@ def test_noorderby_parameters_insubquery(self): checkpositional=("bar",), ) + @testing.variation("use_schema_translate", [True, False]) + @testing.combinations( + "abc", "has spaces", "[abc]", "[has spaces]", argnames="schemaname" + ) + def test_schema_single_token_bracketed( + self, use_schema_translate, schemaname + ): + """test for #9133. + + this is not the actual regression case for #9133, which is instead + within the reflection process. However, when we implemented + #2626, we never considered the case of ``[schema]`` without any + dots in it. + + """ + + schema_no_brackets = schemaname.strip("[]") + + if " " in schemaname: + rendered_schema = "[%s]" % (schema_no_brackets,) + else: + rendered_schema = schema_no_brackets + + metadata = MetaData() + tbl = Table( + "test", + metadata, + Column("id", Integer, primary_key=True), + schema=schemaname if not use_schema_translate else None, + ) + + self.assert_compile( + select(tbl), + "SELECT %(name)s.test.id FROM %(name)s.test" + % {"name": rendered_schema}, + schema_translate_map={None: schemaname} + if use_schema_translate + else None, + render_schema_translate=True if use_schema_translate else False, + ) + def test_schema_many_tokens_one(self): metadata = MetaData() tbl = Table( diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index 4c5a5398164..d24ee4adb9a 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -411,6 +411,35 @@ def test_global_temp_different_collation( ) Table(tname, MetaData(), autoload_with=conn) + @testing.combinations( + ("test_schema"), + ("[test_schema]"), + argnames="schema_value", + ) + @testing.variation( + "reflection_operation", ["has_table", "reflect_table", "get_columns"] + ) + def test_has_table_with_single_token_schema( + self, metadata, connection, schema_value, reflection_operation + ): + """test for #9133""" + tt = Table( + "test", metadata, Column("id", Integer), schema=schema_value + ) + tt.create(connection) + + if reflection_operation.has_table: + is_true(inspect(connection).has_table("test", schema=schema_value)) + elif reflection_operation.reflect_table: + m2 = MetaData() + Table("test", m2, autoload_with=connection, schema=schema_value) + elif reflection_operation.get_columns: + is_true( + inspect(connection).get_columns("test", schema=schema_value) + ) + else: + reflection_operation.fail() + def test_db_qualified_items(self, metadata, connection): Table("foo", metadata, Column("id", Integer, primary_key=True)) Table( From 2c3372f6c03c7d15325cad08c9fd6d08c68fa2fd Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 24 Jan 2023 15:34:27 -0500 Subject: [PATCH 509/632] fix stringify for CreateSchema Fixed stringify for a the :class:`.CreateSchema` DDL construct, which would fail with an ``AttributeError`` when stringified without a dialect. Fixes: #7664 Change-Id: Ifc1769604bc5219c060f5112f7bdea0f780f1a1c (cherry picked from commit 90f4b5d84f248d95f3df38e74be92b23fd880e42) --- doc/build/changelog/unreleased_14/7664.rst | 7 ++++++ lib/sqlalchemy/sql/ddl.py | 2 ++ lib/sqlalchemy/testing/assertions.py | 2 ++ test/sql/test_metadata.py | 27 ++++++++++++++++++++++ 4 files changed, 38 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/7664.rst diff --git a/doc/build/changelog/unreleased_14/7664.rst b/doc/build/changelog/unreleased_14/7664.rst new file mode 100644 index 00000000000..2188ba3bc0f --- /dev/null +++ b/doc/build/changelog/unreleased_14/7664.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, sql + :tickets: 7664 + + Fixed stringify for a the :class:`.CreateSchema` DDL construct, which would + fail with an ``AttributeError`` when stringified without a dialect. + diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index bed64a56701..d47f1b24007 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -422,6 +422,8 @@ class CreateSchema(_CreateDropBase): __visit_name__ = "create_schema" + stringify_dialect = "default" + def __init__(self, name, quote=None, **kw): """Create a new :class:`.CreateSchema` construct.""" diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 5c646790bf0..754f535f564 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -334,8 +334,10 @@ def startswith_(a, fragment, msg=None): def eq_ignore_whitespace(a, b, msg=None): a = re.sub(r"^\s+?|\n", "", a) a = re.sub(r" {2,}", " ", a) + a = re.sub(r"\t", "", a) b = re.sub(r"^\s+?|\n", "", b) b = re.sub(r" {2,}", " ", b) + b = re.sub(r"\t", "", b) assert a == b, msg or "%r != %r" % (a, b) diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index 50cf253379f..be410abd33a 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -48,6 +48,7 @@ from sqlalchemy.testing import ComparesTables from sqlalchemy.testing import emits_warning from sqlalchemy.testing import eq_ +from sqlalchemy.testing import eq_ignore_whitespace from sqlalchemy.testing import expect_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ @@ -2594,6 +2595,32 @@ def test_default_schema_metadata_fk_alt_local(self): t2 = Table("t2", m, Column("x", Integer, ForeignKey("bar.t1.x"))) assert t2.c.x.references(t1.c.x) + @testing.combinations( + (schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema"), + # note we don't yet support lower-case table() or + # lower-case column() for this + # ( + # schema.CreateTable(table("t", column("q", Integer))), + # "CREATE TABLE t (q INTEGER)", + # ), + ( + schema.CreateTable(Table("t", MetaData(), Column("q", Integer))), + "CREATE TABLE t (q INTEGER)", + ), + ( + schema.CreateIndex( + Index( + "foo", + "x", + _table=Table("t", MetaData(), Column("x", Integer)), + ) + ), + "CREATE INDEX foo ON t (x)", + ), + ) + def test_stringify_schema_elements(self, element, expected): + eq_ignore_whitespace(str(element), expected) + def test_create_drop_schema(self): self.assert_compile( From 5df262b5eeb11a52a5c7367d53655ab4c20d0a57 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 24 Jan 2023 15:08:30 -0500 Subject: [PATCH 510/632] reflect Oracle ROWID Added :class:`_oracle.ROWID` to reflected types as this type may be used in a "CREATE TABLE" statement. For 1.4, the cherry-pick also backports a few more type reflection tests that should pass without issue. Fixes: #5047 Change-Id: I818dcf68ed81419d0fd5df5e2d51d6fa0f1be7fc (cherry picked from commit 1c6c5532d8bec105d9aaa7843a176b4e1ef0340a) --- doc/build/changelog/unreleased_14/5047.rst | 6 ++++ lib/sqlalchemy/dialects/oracle/base.py | 1 + setup.cfg | 2 +- test/dialect/oracle/test_reflection.py | 35 ++++++++++++++++++++++ 4 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/5047.rst diff --git a/doc/build/changelog/unreleased_14/5047.rst b/doc/build/changelog/unreleased_14/5047.rst new file mode 100644 index 00000000000..4d08d771f73 --- /dev/null +++ b/doc/build/changelog/unreleased_14/5047.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, oracle + :tickets: 5047 + + Added :class:`_oracle.ROWID` to reflected types as this type may be used in + a "CREATE TABLE" statement. diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index c1736a1f9fd..390ea5098c8 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -759,6 +759,7 @@ def get_dbapi_type(self, dbapi): "LONG": LONG, "BINARY_DOUBLE": BINARY_DOUBLE, "BINARY_FLOAT": BINARY_FLOAT, + "ROWID": ROWID, } diff --git a/setup.cfg b/setup.cfg index 45859cb6ccc..b455c8a097a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -177,6 +177,6 @@ mariadb_connector = mariadb+mariadbconnector://scott:tiger@127.0.0.1:3306/test mssql = mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+13+for+SQL+Server mssql_pymssql = mssql+pymssql://scott:tiger@ms_2008 docker_mssql = mssql+pymssql://scott:tiger^5HHH@127.0.0.1:1433/test -oracle = oracle://scott:tiger@127.0.0.1:1521 +oracle = oracle://scott:tiger@oracle18c oracle8 = oracle://scott:tiger@127.0.0.1:1521/?use_ansi=0 firebird = firebird://sysdba:mainkey@localhost//Users/classic/foo.fdb diff --git a/test/dialect/oracle/test_reflection.py b/test/dialect/oracle/test_reflection.py index b287e1024de..836edc0e927 100644 --- a/test/dialect/oracle/test_reflection.py +++ b/test/dialect/oracle/test_reflection.py @@ -15,14 +15,23 @@ from sqlalchemy import Numeric from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import select +from sqlalchemy import String from sqlalchemy import testing from sqlalchemy import text from sqlalchemy import Unicode from sqlalchemy import UniqueConstraint +from sqlalchemy.dialects.oracle import NVARCHAR2 +from sqlalchemy.dialects.oracle import VARCHAR2 from sqlalchemy.dialects.oracle.base import BINARY_DOUBLE from sqlalchemy.dialects.oracle.base import BINARY_FLOAT from sqlalchemy.dialects.oracle.base import DOUBLE_PRECISION from sqlalchemy.dialects.oracle.base import NUMBER +from sqlalchemy.dialects.oracle.base import RAW +from sqlalchemy.dialects.oracle.base import ROWID +from sqlalchemy.sql.sqltypes import CHAR +from sqlalchemy.sql.sqltypes import NCHAR +from sqlalchemy.sql.sqltypes import NVARCHAR +from sqlalchemy.sql.sqltypes import VARCHAR from sqlalchemy.testing import assert_warns from sqlalchemy.testing import AssertsCompiledSQL from sqlalchemy.testing import eq_ @@ -817,6 +826,32 @@ def test_float_types( ] self._run_test(metadata, connection, specs, ["precision"]) + def test_string_types( + self, + metadata, + connection, + ): + specs = [ + (String(125), VARCHAR(125)), + (String(42).with_variant(VARCHAR2(42), "oracle"), VARCHAR(42)), + (Unicode(125), VARCHAR(125)), + (Unicode(42).with_variant(NVARCHAR2(42), "oracle"), NVARCHAR(42)), + (CHAR(125), CHAR(125)), + (NCHAR(42), NCHAR(42)), + ] + self._run_test(metadata, connection, specs, ["length"]) + + @testing.combinations(ROWID(), RAW(1), argnames="type_") + def test_misc_types(self, metadata, connection, type_): + t = Table("t1", metadata, Column("x", type_)) + + t.create(connection) + + eq_( + inspect(connection).get_columns("t1")[0]["type"]._type_affinity, + type_._type_affinity, + ) + class IdentityReflectionTest(fixtures.TablesTest): __only_on__ = "oracle" From 352dad47349cbf55db1de107a8f7a7f5f3fbefd6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 25 Jan 2023 17:34:08 -0500 Subject: [PATCH 511/632] disable new tox 4.4.0 feature a new flag constrain_package_deps appears to interpret deps as constraints, and not requirements. turn it off. also remove the python setup command and use their default, try to stay compatible Change-Id: Ib400a7783c08c2c63ddb099944cd48b9631acd75 (cherry picked from commit aa2c2c8b4189b0fd71e7f8af0a81eda4204678f9) --- tox.ini | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index f007614b3b7..1c95f068e46 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,10 @@ envlist = py # Jenkins etc. need to call "coverage erase" externally. cov_args=--cov=sqlalchemy --cov-report term --cov-append --cov-report xml --exclude-tag memory-intensive --exclude-tag timing-intensive -k "not aaa_profiling" -install_command=python -m pip install {env:TOX_PIP_OPTS:} {opts} {packages} +# new opt as of tox 4.4.0 was set to True causing it to dump the +# deps below into a constraints file, while requirements of the +# form ".[aiosqlite]" are not valid constraints, those are requirements +constrain_package_deps=false usedevelop= cov: True From 3fe348e3ca33fcb38f2942e6acdaa2222fdcdb83 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 25 Jan 2023 20:19:10 -0500 Subject: [PATCH 512/632] fix incorrect use of testing.future() this has been emitting a warning probably for a long time Change-Id: I44a6766b5e92d14ce6bbb5a90ab52648f877afc2 (cherry picked from commit 74aabce5190449ba2af65c180ebe67fe444193fb) --- lib/sqlalchemy/testing/exclusions.py | 5 ++--- test/orm/test_cascade.py | 2 +- test/orm/test_query.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index d4632799406..1bdbbbbfcce 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -429,9 +429,8 @@ def fails(reason=None): return fails_if(BooleanPredicate(True, reason or "expected to fail")) -@decorator -def future(fn, *arg): - return fails_if(LambdaPredicate(fn), "Future feature") +def future(): + return fails_if(BooleanPredicate(True, "Future feature")) def fails_on(db, reason=None): diff --git a/test/orm/test_cascade.py b/test/orm/test_cascade.py index dd23f84377e..c32eb00cdb7 100644 --- a/test/orm/test_cascade.py +++ b/test/orm/test_cascade.py @@ -2311,7 +2311,7 @@ def test_preserves_orphans_onelevel(self): eq_(sess.query(T2).all(), [T2()]) eq_(sess.query(T3).all(), [T3()]) - @testing.future + @testing.future() def test_preserves_orphans_onelevel_postremove(self): T2, T3, T1 = (self.classes.T2, self.classes.T3, self.classes.T1) diff --git a/test/orm/test_query.py b/test/orm/test_query.py index a249aa34bce..203b7e7e450 100644 --- a/test/orm/test_query.py +++ b/test/orm/test_query.py @@ -7348,7 +7348,7 @@ def test_one_or_none(self): .one_or_none, ) - @testing.future + @testing.future() def test_getslice(self): assert False From f39663404b3b4a5e3cfa6591b418435e3e54738f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 28 Jan 2023 09:37:50 -0500 Subject: [PATCH 513/632] Correct #7664 to include DropSchema Corrected the fix for :ticket:`7664`, released in version 2.0.0, to also include :class:`.DropSchema` which was inadvertently missed in this fix, allowing stringification without a dialect. The fixes for both constructs is backported to the 1.4 series as of 1.4.47. Fixes: #7664 Change-Id: I509b7500ee496ac1e444ea2096c2a02520167e6d (cherry picked from commit 70d1de6cff816d4627dd6b72223d9796e28aca1e) --- doc/build/changelog/unreleased_14/7664.rst | 5 +++-- lib/sqlalchemy/sql/ddl.py | 2 ++ test/sql/test_metadata.py | 20 ++++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/unreleased_14/7664.rst b/doc/build/changelog/unreleased_14/7664.rst index 2188ba3bc0f..466eae8bc95 100644 --- a/doc/build/changelog/unreleased_14/7664.rst +++ b/doc/build/changelog/unreleased_14/7664.rst @@ -2,6 +2,7 @@ :tags: bug, sql :tickets: 7664 - Fixed stringify for a the :class:`.CreateSchema` DDL construct, which would - fail with an ``AttributeError`` when stringified without a dialect. + Fixed stringify for a the :class:`.CreateSchema` and :class:`.DropSchema` + DDL constructs, which would fail with an ``AttributeError`` when + stringified without a dialect. diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index d47f1b24007..275d38c99fe 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -440,6 +440,8 @@ class DropSchema(_CreateDropBase): __visit_name__ = "drop_schema" + stringify_dialect = "default" + def __init__(self, name, quote=None, cascade=False, **kw): """Create a new :class:`.DropSchema` construct.""" diff --git a/test/sql/test_metadata.py b/test/sql/test_metadata.py index be410abd33a..7d8542f8c8e 100644 --- a/test/sql/test_metadata.py +++ b/test/sql/test_metadata.py @@ -2597,6 +2597,7 @@ def test_default_schema_metadata_fk_alt_local(self): @testing.combinations( (schema.CreateSchema("sa_schema"), "CREATE SCHEMA sa_schema"), + (schema.DropSchema("sa_schema"), "DROP SCHEMA sa_schema"), # note we don't yet support lower-case table() or # lower-case column() for this # ( @@ -2607,6 +2608,10 @@ def test_default_schema_metadata_fk_alt_local(self): schema.CreateTable(Table("t", MetaData(), Column("q", Integer))), "CREATE TABLE t (q INTEGER)", ), + ( + schema.DropTable(Table("t", MetaData(), Column("q", Integer))), + "DROP TABLE t", + ), ( schema.CreateIndex( Index( @@ -2617,6 +2622,21 @@ def test_default_schema_metadata_fk_alt_local(self): ), "CREATE INDEX foo ON t (x)", ), + ( + schema.DropIndex( + Index( + "foo", + "x", + _table=Table("t", MetaData(), Column("x", Integer)), + ) + ), + "DROP INDEX foo", + ), + ( + schema.CreateSequence(Sequence("my_seq")), + "CREATE SEQUENCE my_seq START WITH 1", + ), + (schema.DropSequence(Sequence("my_seq")), "DROP SEQUENCE my_seq"), ) def test_stringify_schema_elements(self, element, expected): eq_ignore_whitespace(str(element), expected) From 629d156e94f2f70f42582d7e86ae89b8f242d708 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 30 Jan 2023 09:02:33 -0500 Subject: [PATCH 514/632] use scalars().one() for count migration so that the typed result is non-optional Fixes: #9184 Change-Id: I40e655010d08f795f3b835b9327ce50c6ec72135 (cherry picked from commit 956f152a29632afacacc4ea11dce1a54a7afb3ea) --- doc/build/changelog/migration_20.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/build/changelog/migration_20.rst b/doc/build/changelog/migration_20.rst index 7e567cbe60b..8f7b45c3e40 100644 --- a/doc/build/changelog/migration_20.rst +++ b/doc/build/changelog/migration_20.rst @@ -1449,13 +1449,13 @@ following the table, and may include additional notes not summarized here. - :: - session.scalar( + session.scalars( select(func.count()). select_from(User) - ) - session.scalar( + ).one() + session.scalars( select(func.count(User.id)) - ) + ).one() - :meth:`_orm.Session.scalar` From 38c2878fac181c8806899c0fe841816ea3f49e64 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 1 Feb 2023 11:18:55 -0500 Subject: [PATCH 515/632] fix late-eval example Fixes: #9209 Change-Id: Ic5bc87b25281e3cecc967843c402e96c7c2a3dbb (cherry picked from commit bfc33c5c72e3b24fd62d80b77441b38f40c746b0) --- doc/build/orm/basic_relationships.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/orm/basic_relationships.rst b/doc/build/orm/basic_relationships.rst index dae04081eaf..f50e5045d6a 100644 --- a/doc/build/orm/basic_relationships.rst +++ b/doc/build/orm/basic_relationships.rst @@ -653,7 +653,7 @@ A Python functional approach might look like the following:: # ... children = relationship( - _resolve_child_model(), + _resolve_child_model, order_by=lambda: desc(_resolve_child_model().email_address), primaryjoin=lambda: Parent.id == _resolve_child_model().parent_id, ) From faa20b16590c1a0b10817d3828cf479a906aebda Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 9 Feb 2023 12:05:47 -0500 Subject: [PATCH 516/632] pin sphinx-copybutton and change config sphinx-copybutton introduced a new feature in 0.5.1 which includes a default configuration that breaks the regexp prompt matching scheme. set copybutton_exclude to not include ".gp" as that's the class where we exactly look for the prompts we are matching. While we're there, use this new feature to exclude our sql styles, even though this is not strictly necessary in our case. pin sphinx-copybutton at 0.5.1 to avoid future problems. Change-Id: I8eaeab13995c032b9ee3afd1f08dae5929009d45 References: https://github.com/executablebooks/sphinx-copybutton/issues/185 (cherry picked from commit 13d3b2c291f49525bc38d082d1c2abe9e03bd3fe) --- doc/build/conf.py | 6 ++++++ doc/build/requirements.txt | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/doc/build/conf.py b/doc/build/conf.py index 7c51ff06133..d07758fc055 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -51,6 +51,12 @@ ) copybutton_prompt_is_regexp = True +# workaround +# https://sphinx-copybutton-exclude-issue.readthedocs.io/en/v0.5.1-go/ +# https://github.com/executablebooks/sphinx-copybutton/issues/185 +# while we're at it, add our SQL css classes to also not be copied +copybutton_exclude = ".linenos .show_sql .show_sql_print .popup_sql" + nitpicky = False # The suffix of source filenames. diff --git a/doc/build/requirements.txt b/doc/build/requirements.txt index c5871d21241..6588bf3665d 100644 --- a/doc/build/requirements.txt +++ b/doc/build/requirements.txt @@ -1,4 +1,4 @@ git+https://github.com/sqlalchemyorg/changelog.git#egg=changelog git+https://github.com/sqlalchemyorg/sphinx-paramlinks.git#egg=sphinx-paramlinks git+https://github.com/sqlalchemyorg/zzzeeksphinx.git#egg=zzzeeksphinx -sphinx-copybutton \ No newline at end of file +sphinx-copybutton==0.5.1 From eb8336e12500c2691b9e1f35d4527200b2085e27 Mon Sep 17 00:00:00 2001 From: "Maxwell D. Dorliea" <46686591+Maxcarrassco@users.noreply.github.com> Date: Sun, 19 Feb 2023 16:46:42 +0000 Subject: [PATCH 517/632] Fix grammatical error in intro.rst (#9316) (cherry picked from commit 1fc1aafbc4bd5bc85289699c0de1d82cb6cbc2d3) --- doc/build/intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 2d8ac407dea..02a546a86e1 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -22,7 +22,7 @@ Core contains the breadth of SQLAlchemy's SQL and database integration and description services, the most prominent part of this being the **SQL Expression Language**. -The SQL Expression Language is a toolkit all its own, independent of the ORM +The SQL Expression Language is a toolkit on its own, independent of the ORM package, which provides a system of constructing SQL expressions represented by composable objects, which can then be "executed" against a target database within the scope of a specific transaction, returning a result set. From c8a3a6dfbfc5f2fa5eb4eeb17e317a5e95bbcfed Mon Sep 17 00:00:00 2001 From: Andreas Motl Date: Sun, 19 Feb 2023 08:47:02 -0800 Subject: [PATCH 518/632] Update dialect documentation about CrateDB (#9322) The `crate-0.30.0` package offers compatibility with SQLAlchemy 2.0. (cherry picked from commit 46b81981340a703ae4fbe47fcdaaec85a590d5e9) --- doc/build/dialects/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index 14ef2ed8f66..92675827b9e 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -95,7 +95,7 @@ Currently maintained external dialect projects for SQLAlchemy include: +------------------------------------------------+---------------------------------------+ | CockroachDB | sqlalchemy-cockroachdb_ | +------------------------------------------------+---------------------------------------+ -| CrateDB [1]_ | crate-python_ | +| CrateDB | crate-python_ | +------------------------------------------------+---------------------------------------+ | EXASolution | sqlalchemy_exasol_ | +------------------------------------------------+---------------------------------------+ From 81dd922cf79f4df0eafceb2e5092915d79d72853 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 15 Feb 2023 18:28:12 -0500 Subject: [PATCH 519/632] prevent float tests from running on asyncmy asyncmy 0.2.7 has had a loss in float precision for even very low numbers of significant digits. Change-Id: Iec6d2650943eeaa8e854f21990f6565d73331f8c References: https://github.com/long2ice/asyncmy/issues/56 (cherry picked from commit 8855656626202e541bd2c95bc023e820a022322f) --- lib/sqlalchemy/testing/requirements.py | 14 ++++++++++++++ lib/sqlalchemy/testing/suite/test_types.py | 6 ++---- test/dialect/mysql/test_types.py | 2 ++ test/requirements.py | 4 ++++ 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 0e8eec52d69..0b2e059d0a1 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1043,6 +1043,20 @@ def precision_generic_float_type(self): """ return exclusions.open() + @property + def literal_float_coercion(self): + """target backend will return the exact float value 15.7563 + with only four significant digits from this statement: + + SELECT :param + + where :param is the Python float 15.7563 + + i.e. it does not return 15.75629997253418 + + """ + return exclusions.open() + @property def floats_to_four_decimals(self): """target backend can return a floating-point number with four diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index b96350ed077..6dc50895752 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -692,6 +692,7 @@ def test_float_as_float(self, do_numeric_test): filter_=lambda n: n is not None and round(n, 5) or None, ) + @testing.requires.literal_float_coercion def test_float_coerce_round_trip(self, connection): expr = 15.7563 @@ -941,10 +942,7 @@ def _index_fixtures(include_comparison): ("integer", None), ("float", 28.5), ("float", None), - ( - "float", - 1234567.89, - ), + ("float", 1234567.89, testing.requires.literal_float_coercion), ("numeric", 1234567.89), # this one "works" because the float value you see here is # lost immediately to floating point stuff diff --git a/test/dialect/mysql/test_types.py b/test/dialect/mysql/test_types.py index 017fad3cff6..358b814b92f 100644 --- a/test/dialect/mysql/test_types.py +++ b/test/dialect/mysql/test_types.py @@ -474,6 +474,8 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults): # fixed in mysql-connector as of 2.0.1, # see https://bugs.mysql.com/bug.php?id=73266 + + @testing.requires.literal_float_coercion def test_precision_float_roundtrip(self, metadata, connection): t = Table( "t", diff --git a/test/requirements.py b/test/requirements.py index fa9ba88f58f..47f5c49eb34 100644 --- a/test/requirements.py +++ b/test/requirements.py @@ -1360,6 +1360,10 @@ def precision_numerics_retains_significant_digits(self): ] ) + @property + def literal_float_coercion(self): + return skip_if("+asyncmy") + @property def infinity_floats(self): return fails_on_everything_except( From 4878ede0d921bb03758b0c79293bf23b6e248c49 Mon Sep 17 00:00:00 2001 From: Grey Li Date: Sun, 26 Feb 2023 05:56:37 -0500 Subject: [PATCH 520/632] Add separate version notes for scalars Add separate 1.4.24 and 1.4.26 version notes for the .scalars method; this covers Session, scoped_session, AsyncSession, async_scoped_session as the "scoped" versions did not have the method added until 1.4.26 as part of :ticket:`7103`. Also indicate scoped_session as ``sqlalchemy.orm.scoped_session`` in docs rather than ``sqlalchemy.orm.scoping.scoped_session``. This is also happening in I77da54891860095edcb1f0625ead99fee89bd76f separately, as both changesets refer to scoped_session without using ".scoping". References: #7103 Closes: #9371 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/9371 Pull-request-sha: 61132230cc6e897ab61beff25d98b19a4c0aefd0 Change-Id: I84c8b1aad752db124cfee6bc8516f6eed7ba2faf (cherry picked from commit ff97b9ab5e59283f684edac9a075702c43e8a4c1) --- doc/build/orm/contextual.rst | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 5 ++++- lib/sqlalchemy/orm/session.py | 4 +++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/doc/build/orm/contextual.rst b/doc/build/orm/contextual.rst index 9fadf6c732b..adafc4bab61 100644 --- a/doc/build/orm/contextual.rst +++ b/doc/build/orm/contextual.rst @@ -269,7 +269,7 @@ otherwise self-managed. Contextual Session API ---------------------- -.. autoclass:: sqlalchemy.orm.scoping.scoped_session +.. autoclass:: sqlalchemy.orm.scoped_session :members: :inherited-members: diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 59beb237059..5238b8d1eb5 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -258,7 +258,10 @@ async def scalars( :return: a :class:`_result.ScalarResult` object - .. versionadded:: 1.4.24 + .. versionadded:: 1.4.24 Added :meth:`_asyncio.AsyncSession.scalars` + + .. versionadded:: 1.4.26 Added + :meth:`_asyncio.async_scoped_session.scalars` .. seealso:: diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 2b90269b2ad..753d1ec5f93 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1766,7 +1766,9 @@ def scalars( :return: a :class:`_result.ScalarResult` object - .. versionadded:: 1.4.24 + .. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars` + + .. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars` """ From 807650daa95d8d6ec77b17e2cffcdf47884e1e90 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Mar 2023 10:10:14 -0500 Subject: [PATCH 521/632] additional consistency for ORM/Core in tutorial * Make sure we have blue borders for all sections * rewrite "blue border" text, refer to textual means of determining subject matter for a section; "blue borders" are not a primary source of information * Add some more intro text that was missing Change-Id: I4d599e13d23bad8bb3c199a11afb53e3e9100c59 References: #9450 (cherry picked from commit 8d5986fafd8360ddfe3992bd56602d5a52a23392) --- doc/build/tutorial/data.rst | 2 ++ doc/build/tutorial/data_insert.rst | 4 +-- doc/build/tutorial/engine.rst | 8 +++++ doc/build/tutorial/index.rst | 35 +++++++++++++++++----- doc/build/tutorial/orm_related_objects.rst | 5 ++-- 5 files changed, 41 insertions(+), 13 deletions(-) diff --git a/doc/build/tutorial/data.rst b/doc/build/tutorial/data.rst index 1d5dde7b847..3242710a928 100644 --- a/doc/build/tutorial/data.rst +++ b/doc/build/tutorial/data.rst @@ -5,6 +5,8 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: core-header, orm-addin + .. _tutorial_working_with_data: Working with Data diff --git a/doc/build/tutorial/data_insert.rst b/doc/build/tutorial/data_insert.rst index 0d745cb319c..765b6890b6e 100644 --- a/doc/build/tutorial/data_insert.rst +++ b/doc/build/tutorial/data_insert.rst @@ -5,9 +5,7 @@ .. include:: tutorial_nav_include.rst - -.. rst-class:: core-header - +.. rst-class:: core-header, orm-addin .. _tutorial_core_insert: diff --git a/doc/build/tutorial/engine.rst b/doc/build/tutorial/engine.rst index fc8973c4659..4e53ae6bf9c 100644 --- a/doc/build/tutorial/engine.rst +++ b/doc/build/tutorial/engine.rst @@ -3,11 +3,19 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: core-header, orm-addin + .. _tutorial_engine: Establishing Connectivity - the Engine ========================================== +.. container:: orm-header + + **Welcome ORM and Core readers alike!** + + Every SQLAlchemy application that connects to a database needs to use + an :class:`_engine.Engine`. This short section is for everyone. The start of any SQLAlchemy application is an object called the :class:`_future.Engine`. This object acts as a central source of connections diff --git a/doc/build/tutorial/index.rst b/doc/build/tutorial/index.rst index cb6c2feae3a..2440a33ed08 100644 --- a/doc/build/tutorial/index.rst +++ b/doc/build/tutorial/index.rst @@ -42,9 +42,14 @@ These APIs are known as **Core** and **ORM**. to a database, interacting with database queries and results, and programmatic construction of SQL statements. - Sections that have a **dark blue border on the right** will discuss - concepts that are **primarily Core-only**; when using the ORM, these - concepts are still in play but are less often explicit in user code. + Sections that are **primarily Core-only** will not refer to the ORM. + SQLAlchemy constructs used in these sections will be imported from the + ``sqlalchemy`` namespace. As an additional indicator of subject + classification, they will also include a **dark blue border on the right**. + When using the ORM, these concepts are still in play but are less often + explicit in user code. ORM users should read these sections, but not expect + to be using these APIs directly for ORM-centric code. + .. container:: orm-header @@ -56,14 +61,28 @@ These APIs are known as **Core** and **ORM**. SQL Expression Language to allow SQL queries to be composed and invoked in terms of user-defined objects. - Sections that have a **light blue border on the left** will discuss - concepts that are **primarily ORM-only**. Core-only users - can skip these. + Sections that are **primarily ORM-only** should be **titled to + include the phrase "ORM"**, so that it's clear this is an ORM related topic. + SQLAlchemy constructs used in these sections will be imported from the + ``sqlalchemy.orm`` namespace. Finally, as an additional indicator of + subject classification, they will also include a **light blue border on the + left**. Core-only users can skip these. .. container:: core-header, orm-dependency - A section that has **both light and dark borders on both sides** will - discuss a **Core concept that is also used explicitly with the ORM**. + **Most** sections in this tutorial discuss **Core concepts that + are also used explicitly with the ORM**. SQLAlchemy 2.0 in particular + features a much greater level of integration of Core API use within the + ORM. + + For each of these sections, there will be **introductory text** discussing the + degree to which ORM users should expect to be using these programming + patterns. SQLAlchemy constructs in these sections will be imported from the + ``sqlalchemy`` namespace with some potential use of ``sqlalchemy.orm`` + constructs at the same time. As an additional indicator of subject + classification, these sections will also include **both a thinner light + border on the left, and a thicker dark border on the right**. Core and ORM + users should familiarize with concepts in these sections equally. Tutorial Overview diff --git a/doc/build/tutorial/orm_related_objects.rst b/doc/build/tutorial/orm_related_objects.rst index 02ff2c17221..61ce5a1bd69 100644 --- a/doc/build/tutorial/orm_related_objects.rst +++ b/doc/build/tutorial/orm_related_objects.rst @@ -5,11 +5,12 @@ .. include:: tutorial_nav_include.rst +.. rst-class:: orm-header .. _tutorial_orm_related_objects: -Working with Related Objects -============================ +Working with ORM Related Objects +================================ In this section, we will cover one more essential ORM concept, which is how the ORM interacts with mapped classes that refer to other objects. In the From 73515434c88c73cb81c738ba2ec84953607b9d97 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 15 Mar 2023 10:09:57 -0400 Subject: [PATCH 522/632] remove "listeners" docstring this should have been removed with #4638. Fixes: #9492 Change-Id: If82dba7e63382e921aceb0c01d88f0977b7f5e8d (cherry picked from commit 7fd3b4747d2935effd9e0c2bbe72f080df4ffc50) --- lib/sqlalchemy/engine/create.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 2e1219b442c..16c75fc217d 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -323,10 +323,6 @@ def create_engine(url, **kwargs): :paramref:`_sa.create_engine.max_identifier_length` - :param listeners: A list of one or more - :class:`~sqlalchemy.interfaces.PoolListener` objects which will - receive connection pool events. - :param logging_name: String identifier which will be used within the "name" field of logging records generated within the "sqlalchemy.engine" logger. Defaults to a hexstring of the From 953f3d9ccad72396f3af81188189795d805ee913 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 18 Mar 2023 11:43:47 -0400 Subject: [PATCH 523/632] implement content hashing for custom_op, not identity Fixed critical SQL caching issue where use of the :meth:`_sql.Operators.op` custom operator function would not produce an appropriate cache key, leading to reduce the effectiveness of the SQL cache. Fixes: #9506 Change-Id: I3eab1ddb5e09a811ad717161a59df0884cdf70ed (cherry picked from commit 0a0c7c73729152b7606509b6e750371106dfdd46) --- doc/build/changelog/unreleased_14/9506.rst | 8 ++++++++ lib/sqlalchemy/sql/operators.py | 18 ++++++++++++++++-- lib/sqlalchemy/sql/traversals.py | 2 +- test/sql/test_compare.py | 13 +++++++++++++ 4 files changed, 38 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9506.rst diff --git a/doc/build/changelog/unreleased_14/9506.rst b/doc/build/changelog/unreleased_14/9506.rst new file mode 100644 index 00000000000..2533a986b1c --- /dev/null +++ b/doc/build/changelog/unreleased_14/9506.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, sql + :tickets: 9506 + + Fixed critical SQL caching issue where use of the + :meth:`_sql.Operators.op` custom operator function would not produce an appropriate + cache key, leading to reduce the effectiveness of the SQL cache. + diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 8fd851d1561..2ce1add26f8 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -293,10 +293,24 @@ def __init__( ) def __eq__(self, other): - return isinstance(other, custom_op) and other.opstring == self.opstring + return ( + isinstance(other, custom_op) + and other._hash_key() == self._hash_key() + ) def __hash__(self): - return id(self) + return hash(self._hash_key()) + + def _hash_key(self): + return ( + self.__class__, + self.opstring, + self.precedence, + self.is_comparison, + self.natural_self_precedent, + self.eager_grouping, + self.return_type._static_cache_key if self.return_type else None, + ) def __call__(self, left, right, **kw): return left.operate(self, right, **kw) diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 21aa17a0a64..de97b9de94c 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -1300,7 +1300,7 @@ def visit_boolean( def visit_operator( self, attrname, left_parent, left, right_parent, right, **kw ): - return left is right + return left == right def visit_type( self, attrname, left_parent, left, right_parent, right, **kw diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index 6cee271c9c1..c8e1efbf1b7 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -13,6 +13,7 @@ from sqlalchemy import extract from sqlalchemy import Float from sqlalchemy import Integer +from sqlalchemy import literal from sqlalchemy import literal_column from sqlalchemy import MetaData from sqlalchemy import or_ @@ -204,11 +205,23 @@ class CoreFixtures(object): bindparam("bar", type_=String) ), ), + lambda: ( + literal(1).op("+")(literal(1)), + literal(1).op("-")(literal(1)), + column("q").op("-")(literal(1)), + UnaryExpression(table_a.c.b, modifier=operators.neg), + UnaryExpression(table_a.c.b, modifier=operators.desc_op), + UnaryExpression(table_a.c.b, modifier=operators.custom_op("!")), + UnaryExpression(table_a.c.b, modifier=operators.custom_op("~")), + ), lambda: ( column("q") == column("x"), column("q") == column("y"), column("z") == column("x"), column("z") + column("x"), + column("z").op("foo")(column("x")), + column("z").op("foo")(literal(1)), + column("z").op("bar")(column("x")), column("z") - column("x"), column("x") - column("z"), column("z") > column("x"), From cdad8ab375ab5d04c52f7be7ce1c85f588d8355e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 18 Mar 2023 13:04:58 -0400 Subject: [PATCH 524/632] - 1.4.47 --- doc/build/changelog/changelog_14.rst | 104 +++++++++++++++++- doc/build/changelog/unreleased_14/5047.rst | 6 - doc/build/changelog/unreleased_14/7664.rst | 8 -- doc/build/changelog/unreleased_14/9047.rst | 7 -- doc/build/changelog/unreleased_14/9048.rst | 9 -- doc/build/changelog/unreleased_14/9075.rst | 18 --- doc/build/changelog/unreleased_14/9133.rst | 16 --- doc/build/changelog/unreleased_14/9506.rst | 8 -- .../changelog/unreleased_14/mypy_fix.rst | 22 ---- doc/build/conf.py | 4 +- 10 files changed, 105 insertions(+), 97 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/5047.rst delete mode 100644 doc/build/changelog/unreleased_14/7664.rst delete mode 100644 doc/build/changelog/unreleased_14/9047.rst delete mode 100644 doc/build/changelog/unreleased_14/9048.rst delete mode 100644 doc/build/changelog/unreleased_14/9075.rst delete mode 100644 doc/build/changelog/unreleased_14/9133.rst delete mode 100644 doc/build/changelog/unreleased_14/9506.rst delete mode 100644 doc/build/changelog/unreleased_14/mypy_fix.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3f0027fa146..740b716c7b8 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,109 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.47 - :include_notes_from: unreleased_14 + :released: March 18, 2023 + + .. change:: + :tags: bug, sql + :tickets: 9075 + :versions: 2.0.0rc3 + + Fixed bug / regression where using :func:`.bindparam()` with the same name + as a column in the :meth:`.Update.values` method of :class:`.Update`, as + well as the :meth:`.Insert.values` method of :class:`.Insert` in 2.0 only, + would in some cases silently fail to honor the SQL expression in which the + parameter were presented, replacing the expression with a new parameter of + the same name and discarding any other elements of the SQL expression, such + as SQL functions, etc. The specific case would be statements that were + constructed against ORM entities rather than plain :class:`.Table` + instances, but would occur if the statement were invoked with a + :class:`.Session` or a :class:`.Connection`. + + :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is + backported to 1.4. + + .. change:: + :tags: bug, oracle + :tickets: 5047 + + Added :class:`_oracle.ROWID` to reflected types as this type may be used in + a "CREATE TABLE" statement. + + .. change:: + :tags: bug, sql + :tickets: 7664 + + Fixed stringify for a the :class:`.CreateSchema` and :class:`.DropSchema` + DDL constructs, which would fail with an ``AttributeError`` when + stringified without a dialect. + + + .. change:: + :tags: usecase, mysql + :tickets: 9047 + :versions: 2.0.0 + + Added support to MySQL index reflection to correctly reflect the + ``mysql_length`` dictionary, which previously was being ignored. + + .. change:: + :tags: bug, postgresql + :tickets: 9048 + :versions: 2.0.0 + + Added support to the asyncpg dialect to return the ``cursor.rowcount`` + value for SELECT statements when available. While this is not a typical use + for ``cursor.rowcount``, the other PostgreSQL dialects generally provide + this value. Pull request courtesy Michael Gorven. + + .. change:: + :tags: bug, mssql + :tickets: 9133 + + Fixed bug where a schema name given with brackets, but no dots inside the + name, for parameters such as :paramref:`_schema.Table.schema` would not be + interpreted within the context of the SQL Server dialect's documented + behavior of interpreting explicit brackets as token delimiters, first added + in 1.2 for #2626, when referring to the schema name in reflection + operations. The original assumption for #2626's behavior was that the + special interpretation of brackets was only significant if dots were + present, however in practice, the brackets are not included as part of the + identifier name for all SQL rendering operations since these are not valid + characters within regular or delimited identifiers. Pull request courtesy + Shan. + + + .. change:: + :tags: bug, mypy + :versions: 2.0.0rc3 + + Adjustments made to the mypy plugin to accommodate for some potential + changes being made for issue #236 sqlalchemy2-stubs when using SQLAlchemy + 1.4. These changes are being kept in sync within SQLAlchemy 2.0. + The changes are also backwards compatible with older versions of + sqlalchemy2-stubs. + + + .. change:: + :tags: bug, mypy + :tickets: 9102 + :versions: 2.0.0rc3 + + Fixed crash in mypy plugin which could occur on both 1.4 and 2.0 versions + if a decorator for the :func:`_orm.registry.mapped` decorator were used + that was referenced in an expression with more than two components (e.g. + ``@Backend.mapper_registry.mapped``). This scenario is now ignored; when + using the plugin, the decorator expression needs to be two components (i.e. + ``@reg.mapped``). + + .. change:: + :tags: bug, sql + :tickets: 9506 + + Fixed critical SQL caching issue where use of the + :meth:`_sql.Operators.op` custom operator function would not produce an appropriate + cache key, leading to reduce the effectiveness of the SQL cache. + .. changelog:: :version: 1.4.46 diff --git a/doc/build/changelog/unreleased_14/5047.rst b/doc/build/changelog/unreleased_14/5047.rst deleted file mode 100644 index 4d08d771f73..00000000000 --- a/doc/build/changelog/unreleased_14/5047.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, oracle - :tickets: 5047 - - Added :class:`_oracle.ROWID` to reflected types as this type may be used in - a "CREATE TABLE" statement. diff --git a/doc/build/changelog/unreleased_14/7664.rst b/doc/build/changelog/unreleased_14/7664.rst deleted file mode 100644 index 466eae8bc95..00000000000 --- a/doc/build/changelog/unreleased_14/7664.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 7664 - - Fixed stringify for a the :class:`.CreateSchema` and :class:`.DropSchema` - DDL constructs, which would fail with an ``AttributeError`` when - stringified without a dialect. - diff --git a/doc/build/changelog/unreleased_14/9047.rst b/doc/build/changelog/unreleased_14/9047.rst deleted file mode 100644 index 74110890e8b..00000000000 --- a/doc/build/changelog/unreleased_14/9047.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: usecase, mysql - :tickets: 9047 - :versions: 2.0.0 - - Added support to MySQL index reflection to correctly reflect the - ``mysql_length`` dictionary, which previously was being ignored. diff --git a/doc/build/changelog/unreleased_14/9048.rst b/doc/build/changelog/unreleased_14/9048.rst deleted file mode 100644 index cf0c818349e..00000000000 --- a/doc/build/changelog/unreleased_14/9048.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 9048 - :versions: 2.0.0 - - Added support to the asyncpg dialect to return the ``cursor.rowcount`` - value for SELECT statements when available. While this is not a typical use - for ``cursor.rowcount``, the other PostgreSQL dialects generally provide - this value. Pull request courtesy Michael Gorven. diff --git a/doc/build/changelog/unreleased_14/9075.rst b/doc/build/changelog/unreleased_14/9075.rst deleted file mode 100644 index 0d96be77088..00000000000 --- a/doc/build/changelog/unreleased_14/9075.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 9075 - :versions: 2.0.0rc3 - - Fixed bug / regression where using :func:`.bindparam()` with the same name - as a column in the :meth:`.Update.values` method of :class:`.Update`, as - well as the :meth:`.Insert.values` method of :class:`.Insert` in 2.0 only, - would in some cases silently fail to honor the SQL expression in which the - parameter were presented, replacing the expression with a new parameter of - the same name and discarding any other elements of the SQL expression, such - as SQL functions, etc. The specific case would be statements that were - constructed against ORM entities rather than plain :class:`.Table` - instances, but would occur if the statement were invoked with a - :class:`.Session` or a :class:`.Connection`. - - :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is - backported to 1.4. diff --git a/doc/build/changelog/unreleased_14/9133.rst b/doc/build/changelog/unreleased_14/9133.rst deleted file mode 100644 index 29e05f5fe9d..00000000000 --- a/doc/build/changelog/unreleased_14/9133.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 9133 - - Fixed bug where a schema name given with brackets, but no dots inside the - name, for parameters such as :paramref:`_schema.Table.schema` would not be - interpreted within the context of the SQL Server dialect's documented - behavior of interpreting explicit brackets as token delimiters, first added - in 1.2 for #2626, when referring to the schema name in reflection - operations. The original assumption for #2626's behavior was that the - special interpretation of brackets was only significant if dots were - present, however in practice, the brackets are not included as part of the - identifier name for all SQL rendering operations since these are not valid - characters within regular or delimited identifiers. Pull request courtesy - Shan. - diff --git a/doc/build/changelog/unreleased_14/9506.rst b/doc/build/changelog/unreleased_14/9506.rst deleted file mode 100644 index 2533a986b1c..00000000000 --- a/doc/build/changelog/unreleased_14/9506.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 9506 - - Fixed critical SQL caching issue where use of the - :meth:`_sql.Operators.op` custom operator function would not produce an appropriate - cache key, leading to reduce the effectiveness of the SQL cache. - diff --git a/doc/build/changelog/unreleased_14/mypy_fix.rst b/doc/build/changelog/unreleased_14/mypy_fix.rst deleted file mode 100644 index d383c772876..00000000000 --- a/doc/build/changelog/unreleased_14/mypy_fix.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. change:: - :tags: bug, mypy - :versions: 2.0.0rc3 - - Adjustments made to the mypy plugin to accommodate for some potential - changes being made for issue #236 sqlalchemy2-stubs when using SQLAlchemy - 1.4. These changes are being kept in sync within SQLAlchemy 2.0. - The changes are also backwards compatible with older versions of - sqlalchemy2-stubs. - - -.. change:: - :tags: bug, mypy - :tickets: 9102 - :versions: 2.0.0rc3 - - Fixed crash in mypy plugin which could occur on both 1.4 and 2.0 versions - if a decorator for the :func:`_orm.registry.mapped` decorator were used - that was referenced in an expression with more than two components (e.g. - ``@Backend.mapper_registry.mapped``). This scenario is now ignored; when - using the plugin, the decorator expression needs to be two components (i.e. - ``@reg.mapped``). diff --git a/doc/build/conf.py b/doc/build/conf.py index d07758fc055..9da54538dc4 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -219,9 +219,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.46" +release = "1.4.47" -release_date = "January 3, 2023" +release_date = "March 18, 2023" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 0fff34d160b78a1811b3e86686613ac541712bc9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 18 Mar 2023 13:09:08 -0400 Subject: [PATCH 525/632] Version 1.4.48 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 740b716c7b8..49ad3030488 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.48 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.47 :released: March 18, 2023 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 3b356f9598f..54be07f29d6 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.47" +__version__ = "1.4.48" def __go(lcls): From 1a7f56a1bd2c583577159343327b04a49e8f57fb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 21 Mar 2023 13:40:36 -0400 Subject: [PATCH 526/632] add tip that reflection is not necessary for an existing database a new user spent many days misled by this paragraph thinking they were required to use reflection for an existing database. Change-Id: I4c6757b931481db7a8d4202334382143e1491935 (cherry picked from commit 42b3b80fae8d7b808125efc1dd9a900231c2ed21) --- doc/build/tutorial/metadata.rst | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/build/tutorial/metadata.rst b/doc/build/tutorial/metadata.rst index 215d9fd8b89..df3b336f454 100644 --- a/doc/build/tutorial/metadata.rst +++ b/doc/build/tutorial/metadata.rst @@ -498,8 +498,19 @@ another operation that was mentioned at the beginning of the section, that of **table reflection**. Table reflection refers to the process of generating :class:`_schema.Table` and related objects by reading the current state of a database. Whereas in the previous sections we've been declaring -:class:`_schema.Table` objects in Python and then emitting DDL to the database, -the reflection process does it in reverse. +:class:`_schema.Table` objects in Python, where we then have the option +to emit DDL to the database to generate such a schema, the reflection process +does these two steps in reverse, starting from an existing database +and generating in-Python data structures to represent the schemas within +that database. + +.. tip:: There is no requirement that reflection must be used in order to + use SQLAlchemy with a pre-existing database. It is entirely typical that + the SQLAlchemy application declares all metadata explicitly in Python, + such that its structure corresponds to that the existing database. + The metadata structure also need not include tables, columns, or other + constraints and constructs in the pre-existing database that are not needed + for the local application to function. As an example of reflection, we will create a new :class:`_schema.Table` object which represents the ``some_table`` object we created manually in From 1aef8e75a69319469d3b447422b8cdee2a1cf894 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 2 Apr 2023 14:24:32 -0400 Subject: [PATCH 527/632] consider aliased mappers in cycles also Fixed endless loop which could occur when using "relationship to aliased class" feature and also indicating a recursive eager loader such as ``lazy="selectinload"`` in the loader, in combination with another eager loader on the opposite side. The check for cycles has been fixed to include aliased class relationships. Fixes: #9590 Change-Id: I8d340882f040ff9289c209bedd8fbdfd7186f944 (cherry picked from commit e79ab08165e01dc7af50fcffadb31468ace51b6c) --- doc/build/changelog/unreleased_14/9590.rst | 10 ++++ lib/sqlalchemy/orm/path_registry.py | 2 +- test/orm/test_ac_relationships.py | 63 ++++++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/9590.rst diff --git a/doc/build/changelog/unreleased_14/9590.rst b/doc/build/changelog/unreleased_14/9590.rst new file mode 100644 index 00000000000..472cfc70e8d --- /dev/null +++ b/doc/build/changelog/unreleased_14/9590.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 9590 + :versions: 2.0.9 + + Fixed endless loop which could occur when using "relationship to aliased + class" feature and also indicating a recursive eager loader such as + ``lazy="selectinload"`` in the loader, in combination with another eager + loader on the opposite side. The check for cycles has been fixed to include + aliased class relationships. diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index 4deb96b1f63..dad1fd46c05 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -120,7 +120,7 @@ def pairs(self): def contains_mapper(self, mapper): for path_mapper in [self.path[i] for i in range(0, len(self.path), 2)]: - if path_mapper.is_mapper and path_mapper.isa(mapper): + if path_mapper.mapper.isa(mapper): return True else: return False diff --git a/test/orm/test_ac_relationships.py b/test/orm/test_ac_relationships.py index f59d704f3f2..57e2b25e927 100644 --- a/test/orm/test_ac_relationships.py +++ b/test/orm/test_ac_relationships.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import selectinload from sqlalchemy.orm import Session from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.testing.assertsql import CompiledSQL @@ -331,3 +332,65 @@ def test_join(self): "FROM a JOIN (b JOIN d ON d.b_id = b.id " "JOIN c ON c.id = d.c_id) ON a.b_id = b.id", ) + + +class StructuralEagerLoadCycleTest(fixtures.DeclarativeMappedTest): + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + + bs = relationship(lambda: B, back_populates="a") + + class B(Base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey("a.id")) + + a = relationship(A, lazy="joined", back_populates="bs") + + partitioned_b = aliased(B) + + A.partitioned_bs = relationship( + partitioned_b, lazy="selectin", viewonly=True + ) + + @classmethod + def insert_data(cls, connection): + A, B = cls.classes("A", "B") + + s = Session(connection) + a = A() + a.bs = [B() for _ in range(5)] + s.add(a) + + s.commit() + + @testing.variation("ensure_no_warning", [True, False]) + def test_no_endless_loop(self, ensure_no_warning): + """test #9590""" + + A = self.classes.A + + sess = fixture_session() + + results = sess.scalars(select(A)) + + # the correct behavior is 1. no warnings and 2. no endless loop. + # however when the failure mode is occurring, it correctly warns, + # but then we don't get to see the endless loop happen. + # so test it both ways even though when things are "working", there's + # no problem + if ensure_no_warning: + + a = results.first() + else: + with expect_warnings( + "Loader depth for query is excessively deep", assert_=False + ): + a = results.first() + + a.bs From e45cbb296cd1f158b7627999dc46b1fca798b829 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 17 Apr 2023 08:22:49 -0400 Subject: [PATCH 528/632] clarify get_isolation_level() excludes AUTOCOMMIT I thought this was documented but apparently not. Fixes: #9658 Change-Id: I93fad12c159c599ffdbab1aff586b49e8c92a6e4 (cherry picked from commit f7bfa04bcae1e9bafa19a5ee03aaa0beac532294) --- lib/sqlalchemy/engine/base.py | 65 +++++++++++++++++++---------------- 1 file changed, 35 insertions(+), 30 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index d08e3eb7d0d..084ca8d7565 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -338,7 +338,7 @@ def execution_options(self, **opt): - set per :class:`_engine.Engine` isolation level :meth:`_engine.Connection.get_isolation_level` - - view current level + - view current actual level :ref:`SQLite Transaction Isolation ` @@ -543,22 +543,29 @@ def connection(self): return self._dbapi_connection def get_isolation_level(self): - """Return the current isolation level assigned to this - :class:`_engine.Connection`. - - This will typically be the default isolation level as determined - by the dialect, unless if the - :paramref:`.Connection.execution_options.isolation_level` - feature has been used to alter the isolation level on a - per-:class:`_engine.Connection` basis. - - This attribute will typically perform a live SQL operation in order - to procure the current isolation level, so the value returned is the - actual level on the underlying DBAPI connection regardless of how - this state was set. Compare to the - :attr:`_engine.Connection.default_isolation_level` accessor - which returns the dialect-level setting without performing a SQL - query. + """Return the current **actual** isolation level that's present on + the database within the scope of this connection. + + This attribute will perform a live SQL operation against the database + in order to procure the current isolation level, so the value returned + is the actual level on the underlying DBAPI connection regardless of + how this state was set. This will be one of the four actual isolation + modes ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. It will **not** include the ``AUTOCOMMIT`` isolation + level setting. Third party dialects may also feature additional + isolation level settings. + + .. note:: This method **will not report** on the ``AUTOCOMMIT`` + isolation level, which is a separate :term:`dbapi` setting that's + independent of **actual** isolation level. When ``AUTOCOMMIT`` is + in use, the database connection still has a "traditional" isolation + mode in effect, that is typically one of the four values + ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``, + ``SERIALIZABLE``. + + Compare to the :attr:`_engine.Connection.default_isolation_level` + accessor which returns the isolation level that is present on the + database at initial connection time. .. versionadded:: 0.9.9 @@ -581,27 +588,25 @@ def get_isolation_level(self): @property def default_isolation_level(self): - """The default isolation level assigned to this - :class:`_engine.Connection`. + """The initial-connection time isolation level associated with the + :class:`_engine.Dialect` in use. - This is the isolation level setting that the - :class:`_engine.Connection` - has when first procured via the :meth:`_engine.Engine.connect` method. - This level stays in place until the - :paramref:`.Connection.execution_options.isolation_level` is used - to change the setting on a per-:class:`_engine.Connection` basis. + This value is independent of the + :paramref:`.Connection.execution_options.isolation_level` and + :paramref:`.Engine.execution_options.isolation_level` execution + options, and is determined by the :class:`_engine.Dialect` when the + first connection is created, by performing a SQL query against the + database for the current isolation level before any additional commands + have been emitted. - Unlike :meth:`_engine.Connection.get_isolation_level`, - this attribute is set - ahead of time from the first connection procured by the dialect, - so SQL query is not invoked when this accessor is called. + Calling this accessor does not invoke any new SQL queries. .. versionadded:: 0.9.9 .. seealso:: :meth:`_engine.Connection.get_isolation_level` - - view current level + - view current actual isolation level :paramref:`_sa.create_engine.isolation_level` - set per :class:`_engine.Engine` isolation level From d519bca7f953a0520cda79504dbc019e74e87b28 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 17 Apr 2023 10:16:35 -0400 Subject: [PATCH 529/632] dont assume _compile_options are present Fixed bug where various ORM-specific getters such as :attr:`.ORMExecuteState.is_column_load`, :attr:`.ORMExecuteState.is_relationship_load`, :attr:`.ORMExecuteState.loader_strategy_path` etc. would throw an ``AttributeError`` if the SQL statement itself were a "compound select" such as a UNION. Fixes: #9634 Change-Id: Ia37df5d6f89d6534d69237dcab294bd849ece28b (cherry picked from commit 89608ccd3f5e5796d578e9a39201f7c5c45a61fe) --- doc/build/changelog/unreleased_14/9634.rst | 11 ++++++++ lib/sqlalchemy/orm/session.py | 5 +++- test/orm/test_events.py | 32 ++++++++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/9634.rst diff --git a/doc/build/changelog/unreleased_14/9634.rst b/doc/build/changelog/unreleased_14/9634.rst new file mode 100644 index 00000000000..664e85716be --- /dev/null +++ b/doc/build/changelog/unreleased_14/9634.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, orm + :tickets: 9634 + :versions: 2.0.10 + + Fixed bug where various ORM-specific getters such as + :attr:`.ORMExecuteState.is_column_load`, + :attr:`.ORMExecuteState.is_relationship_load`, + :attr:`.ORMExecuteState.loader_strategy_path` etc. would throw an + ``AttributeError`` if the SQL statement itself were a "compound select" + such as a UNION. diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 753d1ec5f93..5a7a8bb211f 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -347,7 +347,10 @@ def update_execution_options(self, **opts): def _orm_compile_options(self): if not self.is_select: return None - opts = self.statement._compile_options + try: + opts = self.statement._compile_options + except AttributeError: + return None if opts.isinstance(context.ORMCompileState.default_compile_options): return opts else: diff --git a/test/orm/test_events.py b/test/orm/test_events.py index efb39bd2fdc..052b9e01637 100644 --- a/test/orm/test_events.py +++ b/test/orm/test_events.py @@ -8,6 +8,7 @@ from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing +from sqlalchemy import text from sqlalchemy import update from sqlalchemy.orm import attributes from sqlalchemy.orm import class_mapper @@ -292,6 +293,37 @@ def do_orm_execute(ctx): return canary + @testing.combinations( + (lambda: select(1), True), + (lambda User: select(User).union(select(User)), True), + (lambda: text("select * from users"), False), + ) + def test_non_orm_statements(self, stmt, is_select): + sess = Session(testing.db, future=True) + + canary = self._flag_fixture(sess) + + User, Address = self.classes("User", "Address") + stmt = testing.resolve_lambda(stmt, User=User) + sess.execute(stmt).all() + + eq_( + canary.mock_calls, + [ + call.options( + bind_mapper=None, + all_mappers=[], + is_select=is_select, + is_update=False, + is_delete=False, + is_orm_statement=False, + is_relationship_load=False, + is_column_load=False, + lazy_loaded_from=None, + ) + ], + ) + def test_all_mappers_accessor_one(self): User, Address = self.classes("User", "Address") From 6abb35d329aab9b8f27326d223cb9ac935c133e1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 27 Apr 2023 09:49:07 -0400 Subject: [PATCH 530/632] use a lot more random names very small number of tiny names generated by random_names() could cause _ordered_name_fixture() to run out of names. Fixes: #9706 Change-Id: I3df00c9cf99e76fe82eb535c7fe589b73b10cd67 (cherry picked from commit 1329037bfed428e458547824a861ce1aa9df0c78) --- test/orm/test_inspect.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/test/orm/test_inspect.py b/test/orm/test_inspect.py index 3cc7640cf03..0ffc9e86e01 100644 --- a/test/orm/test_inspect.py +++ b/test/orm/test_inspect.py @@ -440,14 +440,16 @@ def _random_names(self): import random import keyword - names = { - "".join( - random.choice("abcdegfghijklmnopqrstuvwxyz") - for i in range(random.randint(3, 15)) - ) - for j in range(random.randint(4, 12)) - } - return list(names.difference(keyword.kwlist)) + def _random_name(): + while True: + name = "".join( + random.choice("abcdegfghijklmnopqrstuvwxyz") + for i in range(random.randint(5, 15)) + ) + if name not in keyword.kwlist: + return name + + return [_random_name() for i in range(random.randint(8, 15))] def _ordered_name_fixture(self, glbls, clsname, base, supercls): import random From 13752f00f30f369de8a51859e78d79b06601762c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 30 Apr 2023 13:56:33 -0400 Subject: [PATCH 531/632] do not allow non-cache-key entity objects in annotations Fixed critical caching issue where combination of :func:`_orm.aliased()` :func:`_sql.case` and :func:`_hybrid.hybrid_property` expressions would cause a cache key mismatch, leading to cache keys that held onto the actual :func:`_orm.aliased` object while also not matching each other, filling up the cache. Fixes: #9728 Change-Id: I700645b5629a81a0104cf923db72a7421fa43ff4 (cherry picked from commit 4d69d83530666f9aaf3fb327d8c63110ef5e7ff5) --- doc/build/changelog/unreleased_14/9728.rst | 10 +++++ lib/sqlalchemy/orm/attributes.py | 31 ++++++++++--- test/orm/test_cache_key.py | 51 ++++++++++++++++++++++ 3 files changed, 85 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/9728.rst diff --git a/doc/build/changelog/unreleased_14/9728.rst b/doc/build/changelog/unreleased_14/9728.rst new file mode 100644 index 00000000000..a8bced33bcd --- /dev/null +++ b/doc/build/changelog/unreleased_14/9728.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 9728 + :versions: 2.0.12 + + Fixed critical caching issue where combination of :func:`_orm.aliased()` + :func:`_sql.case` and :func:`_hybrid.hybrid_property` expressions would + cause a cache key mismatch, leading to cache keys that held onto the actual + :func:`_orm.aliased` object while also not matching each other, filling up + the cache. diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index c6be3e6d0cf..2e82851a23f 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -54,6 +54,8 @@ from ..sql import roles from ..sql import traversals from ..sql import visitors +from ..sql.traversals import HasCacheKey +from ..sql.visitors import InternalTraversal class NoKey(str): @@ -223,13 +225,16 @@ def expression(self): subclass representing a column expression. """ + entity_namespace = self._entity_namespace + assert isinstance(entity_namespace, HasCacheKey) + if self.key is NO_KEY: - annotations = {"entity_namespace": self._entity_namespace} + annotations = {"entity_namespace": entity_namespace} else: annotations = { "proxy_key": self.key, "proxy_owner": self._parententity, - "entity_namespace": self._entity_namespace, + "entity_namespace": entity_namespace, } ce = self.comparator.__clause_element__() @@ -482,10 +487,22 @@ def __get__(self, instance, owner): return self.impl.get(state, dict_) -HasEntityNamespace = util.namedtuple( - "HasEntityNamespace", ["entity_namespace"] -) -HasEntityNamespace.is_mapper = HasEntityNamespace.is_aliased_class = False +class HasEntityNamespace(HasCacheKey): + __slots__ = ("_entity_namespace",) + + is_mapper = False + is_aliased_class = False + + _traverse_internals = [ + ("_entity_namespace", InternalTraversal.dp_has_cache_key), + ] + + def __init__(self, ent): + self._entity_namespace = ent + + @property + def entity_namespace(self): + return self._entity_namespace.entity_namespace def create_proxied_attribute(descriptor): @@ -550,7 +567,7 @@ def _entity_namespace(self): else: # used by hybrid attributes which try to remain # agnostic of any ORM concepts like mappers - return HasEntityNamespace(self.class_) + return HasEntityNamespace(self._parententity) @property def property(self): diff --git a/test/orm/test_cache_key.py b/test/orm/test_cache_key.py index 6720baf024f..93d980e00a5 100644 --- a/test/orm/test_cache_key.py +++ b/test/orm/test_cache_key.py @@ -2,6 +2,7 @@ import sqlalchemy as sa from sqlalchemy import Column +from sqlalchemy import column from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer @@ -16,6 +17,7 @@ from sqlalchemy import update from sqlalchemy import util from sqlalchemy.ext.declarative import ConcreteBase +from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import aliased from sqlalchemy.orm import Bundle from sqlalchemy.orm import defaultload @@ -785,6 +787,55 @@ def three(): compare_values=True, ) + @testing.variation( + "exprtype", ["plain_column", "self_standing_case", "case_w_columns"] + ) + def test_hybrid_w_case_ac(self, decl_base, exprtype): + """test #9728""" + + class Employees(decl_base): + __tablename__ = "employees" + id = Column(String(128), primary_key=True) + first_name = Column(String(length=64)) + + @hybrid_property + def name(self): + return self.first_name + + @name.expression + def name( + cls, + ): + if exprtype.plain_column: + return cls.first_name + elif exprtype.self_standing_case: + return case( + (column("x") == 1, column("q")), + else_=column("q"), + ) + elif exprtype.case_w_columns: + return case( + (column("x") == 1, column("q")), + else_=cls.first_name, + ) + else: + exprtype.fail() + + def go1(): + employees_2 = aliased(Employees, name="employees_2") + stmt = select(employees_2.name) + return stmt + + def go2(): + employees_2 = aliased(Employees, name="employees_2") + stmt = select(employees_2) + return stmt + + self._run_cache_key_fixture( + lambda: stmt_20(go1(), go2()), + compare_values=True, + ) + class RoundTripTest(QueryTest, AssertsCompiledSQL): __dialect__ = "default" From dd580c6cb7df7b5cc297bfa8667e1ac3d32563e5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 30 Apr 2023 17:22:44 -0400 Subject: [PATCH 532/632] adjust verbiage Change-Id: Icc8f201d63e4cc2e7df2f42acb28a6dc84a58342 (cherry picked from commit 623044bb54a58eaa25ed239b7646396548f4b1e7) --- doc/build/changelog/unreleased_14/9728.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/build/changelog/unreleased_14/9728.rst b/doc/build/changelog/unreleased_14/9728.rst index a8bced33bcd..c5908dbad61 100644 --- a/doc/build/changelog/unreleased_14/9728.rst +++ b/doc/build/changelog/unreleased_14/9728.rst @@ -3,8 +3,8 @@ :tickets: 9728 :versions: 2.0.12 - Fixed critical caching issue where combination of :func:`_orm.aliased()` - :func:`_sql.case` and :func:`_hybrid.hybrid_property` expressions would - cause a cache key mismatch, leading to cache keys that held onto the actual - :func:`_orm.aliased` object while also not matching each other, filling up - the cache. + Fixed critical caching issue where the combination of + :func:`_orm.aliased()` and :func:`_hybrid.hybrid_property` expression + compositions would cause a cache key mismatch, leading to cache keys that + held onto the actual :func:`_orm.aliased` object while also not matching + that of equivalent constructs, filling up the cache. From b71d50730367493c17473019c7c4c791c9be84d5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 30 Apr 2023 17:24:32 -0400 Subject: [PATCH 533/632] - 1.4.48 --- doc/build/changelog/changelog_14.rst | 36 +++++++++++++++++++++- doc/build/changelog/unreleased_14/9590.rst | 10 ------ doc/build/changelog/unreleased_14/9634.rst | 11 ------- doc/build/changelog/unreleased_14/9728.rst | 10 ------ doc/build/conf.py | 4 +-- 5 files changed, 37 insertions(+), 34 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/9590.rst delete mode 100644 doc/build/changelog/unreleased_14/9634.rst delete mode 100644 doc/build/changelog/unreleased_14/9728.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 49ad3030488..d11f26340c4 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,41 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.48 - :include_notes_from: unreleased_14 + :released: April 30, 2023 + + .. change:: + :tags: bug, orm + :tickets: 9728 + :versions: 2.0.12 + + Fixed critical caching issue where the combination of + :func:`_orm.aliased()` and :func:`_hybrid.hybrid_property` expression + compositions would cause a cache key mismatch, leading to cache keys that + held onto the actual :func:`_orm.aliased` object while also not matching + that of equivalent constructs, filling up the cache. + + .. change:: + :tags: bug, orm + :tickets: 9634 + :versions: 2.0.10 + + Fixed bug where various ORM-specific getters such as + :attr:`.ORMExecuteState.is_column_load`, + :attr:`.ORMExecuteState.is_relationship_load`, + :attr:`.ORMExecuteState.loader_strategy_path` etc. would throw an + ``AttributeError`` if the SQL statement itself were a "compound select" + such as a UNION. + + .. change:: + :tags: bug, orm + :tickets: 9590 + :versions: 2.0.9 + + Fixed endless loop which could occur when using "relationship to aliased + class" feature and also indicating a recursive eager loader such as + ``lazy="selectinload"`` in the loader, in combination with another eager + loader on the opposite side. The check for cycles has been fixed to include + aliased class relationships. .. changelog:: :version: 1.4.47 diff --git a/doc/build/changelog/unreleased_14/9590.rst b/doc/build/changelog/unreleased_14/9590.rst deleted file mode 100644 index 472cfc70e8d..00000000000 --- a/doc/build/changelog/unreleased_14/9590.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 9590 - :versions: 2.0.9 - - Fixed endless loop which could occur when using "relationship to aliased - class" feature and also indicating a recursive eager loader such as - ``lazy="selectinload"`` in the loader, in combination with another eager - loader on the opposite side. The check for cycles has been fixed to include - aliased class relationships. diff --git a/doc/build/changelog/unreleased_14/9634.rst b/doc/build/changelog/unreleased_14/9634.rst deleted file mode 100644 index 664e85716be..00000000000 --- a/doc/build/changelog/unreleased_14/9634.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 9634 - :versions: 2.0.10 - - Fixed bug where various ORM-specific getters such as - :attr:`.ORMExecuteState.is_column_load`, - :attr:`.ORMExecuteState.is_relationship_load`, - :attr:`.ORMExecuteState.loader_strategy_path` etc. would throw an - ``AttributeError`` if the SQL statement itself were a "compound select" - such as a UNION. diff --git a/doc/build/changelog/unreleased_14/9728.rst b/doc/build/changelog/unreleased_14/9728.rst deleted file mode 100644 index c5908dbad61..00000000000 --- a/doc/build/changelog/unreleased_14/9728.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 9728 - :versions: 2.0.12 - - Fixed critical caching issue where the combination of - :func:`_orm.aliased()` and :func:`_hybrid.hybrid_property` expression - compositions would cause a cache key mismatch, leading to cache keys that - held onto the actual :func:`_orm.aliased` object while also not matching - that of equivalent constructs, filling up the cache. diff --git a/doc/build/conf.py b/doc/build/conf.py index 9da54538dc4..872a7d4086d 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -219,9 +219,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.47" +release = "1.4.48" -release_date = "March 18, 2023" +release_date = "April 30, 2023" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From cc7298e406b46c659d474152a9c04ba040dbffcf Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 30 Apr 2023 17:33:17 -0400 Subject: [PATCH 534/632] Version 1.4.49 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index d11f26340c4..ab9552ec7f5 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.49 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.48 :released: April 30, 2023 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 54be07f29d6..e6c51677ff1 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.48" +__version__ = "1.4.49" def __go(lcls): From 6f723defa45effb9767d2eae80893502baf76194 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 10 May 2023 10:08:47 -0400 Subject: [PATCH 535/632] note future row change for session.execute() Fixes: #9761 Change-Id: I555e822b092d047badab86b3a365380121968592 (cherry picked from commit 987285fb4b13c39bcc6b8922e618d9e830577dda) --- doc/build/changelog/migration_14.rst | 49 ++++++++++++++++++++++++++-- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/migration_14.rst b/doc/build/changelog/migration_14.rst index 85c8c1d3f30..023550d8612 100644 --- a/doc/build/changelog/migration_14.rst +++ b/doc/build/changelog/migration_14.rst @@ -69,7 +69,6 @@ be encouraged to move to :term:`2.0 style` execution which allows Core construct to be used freely against ORM entities:: with Session(engine, future=True) as sess: - stmt = ( select(User) .where(User.name == "sandy") @@ -105,7 +104,8 @@ Things to note about the above example: * Statements that work with ORM entities and are expected to return ORM results are invoked using :meth:`.orm.Session.execute`. See - :ref:`session_querying_20` for a primer. + :ref:`session_querying_20` for a primer. See also the following note + at :ref:`change_session_execute_result`. * a :class:`_engine.Result` object is returned, rather than a plain list, which itself is a much more sophisticated version of the previous ``ResultProxy`` @@ -153,6 +153,49 @@ for some examples). :ticket:`5159` + +.. _change_session_execute_result: + +ORM ``Session.execute()`` uses "future" style ``Result`` sets in all cases +-------------------------------------------------------------------------- + +As noted in :ref:`change_4710_core`, the :class:`_engine.Result` and +:class:`_engine.Row` objects now feature "named tuple" behavior, when used with +an :class:`_engine.Engine` that includes the +:paramref:`_sa.create_engine.future` parameter set to ``True``. These +"named tuple" rows in particular include a behavioral change which is that +Python containment expressions using ``in``, such as:: + + >>> engine = create_engine("...", future=True) + >>> conn = engine.connect() + >>> row = conn.execute.first() + >>> "name" in row + True + +The above containment test will +use **value containment**, not **key containment**; the ``row`` would need to +have a **value** of "name" to return ``True``. + +Under SQLAlchemy 1.4, when :paramref:`_sa.create_engine.future` parameter set +to ``False``, legacy-style ``LegacyRow`` objects are returned which feature the +partial-named-tuple behavior of prior SQLAlchemy versions, where containment +checks continue to use key containment; ``"name" in row`` would return +True if the row had a **column** named "name", rather than a value. + +When using :meth:`_orm.Session.execute`, full named-tuple style is enabled +**unconditionally**, meaning ``"name" in row`` will use **value containment** +as the test, and **not** key containment. This is to accommodate that +:meth:`_orm.Session.execute` now returns a :class:`_engine.Result` that also +accommodates for ORM results, where even legacy ORM result rows such as those +returned by :meth:`_orm.Query.all` use value containment. + +This is a behavioral change from SQLAlchemy 1.3 to 1.4. To continue receiving +key-containment collections, use the :meth:`_engine.Result.mappings` method to +receive a :class:`_engine.MappingResult` that returns rows as dictionaries:: + + for dict_row in session.execute(text("select id from table")).mappings(): + assert "id" in dict_row + .. _change_4639: Transparent SQL Compilation Caching added to All DQL, DML Statements in Core, ORM @@ -1477,6 +1520,8 @@ There are many reasons why the above assumptions do not hold: :ref:`change_4710_orm` + :ref:`change_session_execute_result` + :ticket:`4710` .. _change_4753: From 04380021c8e52787dd327129f3c66e11f737904d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 12 May 2023 22:30:40 -0400 Subject: [PATCH 536/632] qualify session.execute() resulting in autoflush maybe this was planned differently at some point but session.execute() only autoflushes for ORM contexts. Change-Id: Ia10af232248e321875f79d5bde71f64d3dc25177 References: #9776 (cherry picked from commit eb286c15f096771dbb128acbe8fe03e94aa72f6a) --- doc/build/orm/session_basics.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/build/orm/session_basics.rst b/doc/build/orm/session_basics.rst index fcf384d4a2a..41390ff0bc4 100644 --- a/doc/build/orm/session_basics.rst +++ b/doc/build/orm/session_basics.rst @@ -447,7 +447,9 @@ The flush which occurs automatically within the scope of certain methods is known as **autoflush**. Autoflush is defined as a configurable, automatic flush call which occurs at the beginning of methods including: -* :meth:`_orm.Session.execute` and other SQL-executing methods +* :meth:`_orm.Session.execute` and other SQL-executing methods, when used + against ORM-enabled SQL constructs, such as :func:`_sql.select` objects + that refer to ORM entities and/or ORM-mapped attributes * When a :class:`_query.Query` is invoked to send SQL to the database * Within the :meth:`.Session.merge` method before querying the database * When objects are :ref:`refreshed ` From 8e465ce2788a726c614c060c11ae77b05d0b99af Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 May 2023 09:50:09 -0400 Subject: [PATCH 537/632] clarify exec_driver_sql this docstring wasn't really saying what this method was for. Change-Id: I2d83ed2690c93d215faadaf7c6edcb02c6c57a6f (cherry picked from commit 75d716c7799e28ba5c86c72db08025dd6fc8c6e0) --- lib/sqlalchemy/engine/base.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 084ca8d7565..53916cc1233 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1712,8 +1712,11 @@ def _execute_20( def exec_driver_sql( self, statement, parameters=None, execution_options=None ): - r"""Executes a SQL statement construct and returns a - :class:`_engine.CursorResult`. + r"""Executes a string SQL statement on the DBAPI cursor directly, + without any SQL compilation steps. + + This can be used to pass any string directly to the + ``cursor.execute()`` method of the DBAPI in use. :param statement: The statement str to be executed. Bound parameters must use the underlying DBAPI's paramstyle, such as "qmark", @@ -1724,6 +1727,8 @@ def exec_driver_sql( a tuple of positional parameters, or a list containing either dictionaries or tuples for multiple-execute support. + :return: a :class:`_engine.CursorResult`. + E.g. multiple dictionaries:: From b39af62d10f7244f99635619ddb29a45359ace92 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 20 Jun 2023 15:24:38 -0400 Subject: [PATCH 538/632] updates for mypy 1.4 mypy 1.4 is reporting new style types list[], tuple[], etc. as well as "x | None" for optional. they also added one argument for format_type(). This is for 1.4 backport as well Change-Id: I68084199858e9da901641d6036780437bcf5f2d6 (cherry picked from commit f79d09221b1ec6cd6bc8d83d6e947db5f75c6d1c) --- doc/build/changelog/unreleased_14/mypy14.rst | 5 +++++ lib/sqlalchemy/ext/mypy/infer.py | 5 ++--- lib/sqlalchemy/ext/mypy/util.py | 15 +++++++++++++++ test/ext/mypy/test_mypy_plugin_py3k.py | 19 +++++++++++++++++++ 4 files changed, 41 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/mypy14.rst diff --git a/doc/build/changelog/unreleased_14/mypy14.rst b/doc/build/changelog/unreleased_14/mypy14.rst new file mode 100644 index 00000000000..97c37514f8a --- /dev/null +++ b/doc/build/changelog/unreleased_14/mypy14.rst @@ -0,0 +1,5 @@ +.. change:: + :tags: bug, ext + :versions: 2.0.17 + + Fixed issue in mypy plugin for use with mypy 1.4. diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index f88a960bd2e..f3f44a42504 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -9,7 +9,6 @@ from typing import Sequence from mypy.maptype import map_instance_to_supertype -from mypy.messages import format_type from mypy.nodes import AssignmentStmt from mypy.nodes import CallExpr from mypy.nodes import Expression @@ -454,8 +453,8 @@ def _infer_type_from_left_and_inferred_right( api, msg.format( node.name, - format_type(orig_left_hand_type), - format_type(effective_type), + util.format_type(orig_left_hand_type, api.options), + util.format_type(effective_type, api.options), ), node, ) diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index 16b365e1eee..373fd4bfbc4 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -10,6 +10,8 @@ from typing import TypeVar from typing import Union +from mypy import version +from mypy.messages import format_type as _mypy_format_type from mypy.nodes import ARG_POS from mypy.nodes import CallExpr from mypy.nodes import ClassDef @@ -23,6 +25,7 @@ from mypy.nodes import Statement from mypy.nodes import SymbolTableNode from mypy.nodes import TypeInfo +from mypy.options import Options from mypy.plugin import ClassDefContext from mypy.plugin import DynamicClassDefContext from mypy.plugin import SemanticAnalyzerPluginInterface @@ -35,6 +38,11 @@ from mypy.types import UnboundType from mypy.types import UnionType +_vers = tuple( + [int(x) for x in version.__version__.split(".") if re.match(r"^\d+$", x)] +) +mypy_14 = _vers >= (1, 4) + _TArgType = TypeVar("_TArgType", bound=Union[CallExpr, NameExpr]) @@ -151,6 +159,13 @@ def get_mapped_attributes( return attributes +def format_type(typ_: Type, options: Options) -> str: + if mypy_14: + return _mypy_format_type(typ_, options) # type: ignore + else: + return _mypy_format_type(typ_) # type: ignore + + def set_mapped_attributes( info: TypeInfo, attributes: List[SQLAlchemyAttribute] ) -> None: diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index cb04d1c739a..be80043ca88 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -172,6 +172,9 @@ def test_mypy(self, mypy_runner, path): expected_errors = [] expected_re = re.compile(r"\s*# EXPECTED(_MYPY)?: (.+)") py_ver_re = re.compile(r"^#\s*PYTHON_VERSION\s?>=\s?(\d+\.\d+)") + + from sqlalchemy.ext.mypy.util import mypy_14 + with open(path) as file_: for num, line in enumerate(file_, 1): m = py_ver_re.match(line) @@ -191,6 +194,22 @@ def test_mypy(self, mypy_runner, path): is_mypy = bool(m.group(1)) expected_msg = m.group(2) expected_msg = re.sub(r"# noqa[:]? ?.*", "", m.group(2)) + + if mypy_14: + # skip first character which could be capitalized + # "List item x not found" type of message + expected_msg = expected_msg[0] + re.sub( + r"\b(List|Tuple|Dict|Set|Type)\b", + lambda m: m.group(1).lower(), + expected_msg[1:], + ) + + expected_msg = re.sub( + r"Optional\[(.*?)\]", + lambda m: f"{m.group(1)} | None", + expected_msg, + ) + expected_errors.append( (num, is_mypy, expected_msg.strip()) ) From 59e80b945fc8be3316307e0e004f6133dcef10c2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 21 Jun 2023 14:59:21 -0400 Subject: [PATCH 539/632] qualify mypy1.4 update for python 3.9, 3.10 + in I68084199858e9da901641d6036780437bcf5f2d6 we added a mypy1.4 check to check for new-style type messages. mypy only does lowercase types on python 3.9 and above, OR syntax on 3.10 and above. qualify these both Change-Id: Ic1ee12927ae02c1936d1c2905db28b587c7fece7 (cherry picked from commit cb39c0109ef5167de3a7a682cc553480172dac82) --- test/ext/mypy/test_mypy_plugin_py3k.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/ext/mypy/test_mypy_plugin_py3k.py b/test/ext/mypy/test_mypy_plugin_py3k.py index be80043ca88..a92aee1e712 100644 --- a/test/ext/mypy/test_mypy_plugin_py3k.py +++ b/test/ext/mypy/test_mypy_plugin_py3k.py @@ -5,6 +5,7 @@ import tempfile from sqlalchemy import testing +from sqlalchemy import util from sqlalchemy.testing import config from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures @@ -195,7 +196,10 @@ def test_mypy(self, mypy_runner, path): expected_msg = m.group(2) expected_msg = re.sub(r"# noqa[:]? ?.*", "", m.group(2)) - if mypy_14: + if mypy_14 and util.py39: + # use_lowercase_names, py39 and above + # https://github.com/python/mypy/blob/304997bfb85200fb521ac727ee0ce3e6085e5278/mypy/options.py#L363 # noqa: E501 + # skip first character which could be capitalized # "List item x not found" type of message expected_msg = expected_msg[0] + re.sub( @@ -204,6 +208,9 @@ def test_mypy(self, mypy_runner, path): expected_msg[1:], ) + if mypy_14 and util.py310: + # use_or_syntax, py310 and above + # https://github.com/python/mypy/blob/304997bfb85200fb521ac727ee0ce3e6085e5278/mypy/options.py#L368 # noqa: E501 expected_msg = re.sub( r"Optional\[(.*?)\]", lambda m: f"{m.group(1)} | None", From cd56e873e1db4e6c8bee9e035627beba80251bea Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 22 May 2023 21:54:47 +0200 Subject: [PATCH 540/632] add 3.12 Initial fixes to test to accommodate py312 this is a backport of 59521abcc0676e936b31a523bd968fc157fef0c2 however includes greenlet>=3.0.0a1 that now builds and/or installs on Python 3.12. result row handling is also different in 1.4. Fixes: #9819 Change-Id: I91a51dcbad2902f7c4c7cec88ebbf42c2417b512 (cherry picked from commit 59521abcc0676e936b31a523bd968fc157fef0c2) --- doc/build/changelog/unreleased_20/py312.rst | 4 +++ examples/versioned_history/history_meta.py | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 8 ++++- lib/sqlalchemy/engine/row.py | 5 ++- lib/sqlalchemy/sql/sqltypes.py | 7 +++- lib/sqlalchemy/testing/warnings.py | 6 ++++ test/base/test_result.py | 9 +++++- test/engine/test_pool.py | 11 +++++-- test/engine/test_reconnect.py | 36 ++++++++++----------- tox.ini | 11 ++++++- 10 files changed, 71 insertions(+), 28 deletions(-) create mode 100644 doc/build/changelog/unreleased_20/py312.rst diff --git a/doc/build/changelog/unreleased_20/py312.rst b/doc/build/changelog/unreleased_20/py312.rst new file mode 100644 index 00000000000..330cebb6434 --- /dev/null +++ b/doc/build/changelog/unreleased_20/py312.rst @@ -0,0 +1,4 @@ +.. change:: + :tags: installation + + Compatibility improvements to work fully with Python 3.12 diff --git a/examples/versioned_history/history_meta.py b/examples/versioned_history/history_meta.py index 7d13f2d7456..1f83cf6d4fa 100644 --- a/examples/versioned_history/history_meta.py +++ b/examples/versioned_history/history_meta.py @@ -116,7 +116,7 @@ def _col_copy(col): Column( "changed", DateTime, - default=datetime.datetime.utcnow, + default=lambda: datetime.datetime.now(datetime.timezone.utc), info=version_meta, ) ) diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 9d1f0ead480..20f1536529f 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -21,6 +21,12 @@ typedef Py_ssize_t (*lenfunc)(PyObject *); typedef intargfunc ssizeargfunc; #endif +#if PY_VERSION_HEX > 0x030c0000 +# define PY_RAISE_SLICE_FOR_MAPPING PyExc_KeyError +#else +# define PY_RAISE_SLICE_FOR_MAPPING PyExc_TypeError +#endif + #if PY_MAJOR_VERSION < 3 // new typedef in Python 3 @@ -369,7 +375,7 @@ BaseRow_getitem_by_object(BaseRow *self, PyObject *key, int asmapping) if (record == NULL) { if (PySlice_Check(key)) { - PyErr_Format(PyExc_TypeError, "can't use slices for mapping access"); + PyErr_Format(PY_RAISE_SLICE_FOR_MAPPING, "can't use slices for mapping access"); return NULL; } record = PyObject_CallMethod(self->parent, "_key_fallback", diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index f7c00bab37f..50577ffe8d7 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -130,7 +130,10 @@ def _get_by_key_impl(self, key): try: rec = self._keymap[key] except KeyError as ke: - rec = self._parent._key_fallback(key, ke) + if isinstance(key, slice): + return tuple(self._data[key]) + else: + rec = self._parent._key_fallback(key, ke) except TypeError: if isinstance(key, slice): return tuple(self._data[key]) diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 84239c70869..eed63e5070d 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2058,7 +2058,12 @@ class Interval(Emulated, _AbstractInterval, TypeDecorator): """ impl = DateTime - epoch = dt.datetime.utcfromtimestamp(0) + if compat.py2k: + epoch = dt.datetime.utcfromtimestamp(0) + else: + epoch = dt.datetime.fromtimestamp(0, dt.timezone.utc).replace( + tzinfo=None + ) cache_ok = True def __init__(self, native=True, second_precision=None, day_precision=None): diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index 762b0703919..7969a73ff7b 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -46,6 +46,12 @@ def setup_filters(): warnings.filterwarnings( "error", category=DeprecationWarning, module=origin ) + warnings.filterwarnings( + "ignore", + category=DeprecationWarning, + message=r".*The default (?:date)?(?:time)?(?:stamp)? " + r"(adapter|converter) is deprecated", + ) # ignore things that are deprecated *as of* 2.0 :) warnings.filterwarnings( diff --git a/test/base/test_result.py b/test/base/test_result.py index c7b18fed384..86874b41df0 100644 --- a/test/base/test_result.py +++ b/test/base/test_result.py @@ -9,7 +9,9 @@ from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_false from sqlalchemy.testing import is_true +from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.util import picklers +from sqlalchemy.util import compat class ResultTupleTest(fixtures.TestBase): @@ -66,7 +68,12 @@ def test_slice_access(self): def test_slices_arent_in_mappings(self): keyed_tuple = self._fixture([1, 2], ["a", "b"]) - assert_raises(TypeError, lambda: keyed_tuple._mapping[0:2]) + if compat.py312: + with expect_raises(KeyError): + keyed_tuple._mapping[0:2] + else: + with expect_raises(TypeError): + keyed_tuple._mapping[0:2] def test_integers_arent_in_mappings(self): keyed_tuple = self._fixture([1, 2], ["a", "b"]) diff --git a/test/engine/test_pool.py b/test/engine/test_pool.py index 2e11002efcc..fea65fe4c4a 100644 --- a/test/engine/test_pool.py +++ b/test/engine/test_pool.py @@ -750,8 +750,8 @@ def test_invalidate_event_exception(self): assert canary.call_args_list[0][0][0] is dbapi_con assert canary.call_args_list[0][0][2] is exc - @testing.combinations((True,), (False,), argnames="is_asyncio") - @testing.combinations((True,), (False,), argnames="has_terminate") + @testing.variation("is_asyncio", [(True, testing.requires.asyncio), False]) + @testing.variation("has_terminate", [True, False]) @testing.requires.python3 def test_checkin_event_gc(self, is_asyncio, has_terminate): p, canary = self._checkin_event_fixture( @@ -1684,7 +1684,12 @@ def handle_checkout_event(dbapi_con, con_record, con_proxy): exc_cls=TimeoutThing if exc_type.base_exception else Exception, ) - @testing.combinations((True, testing.requires.python3), (False,)) + @testing.variation( + "detach_gced", + [("detached_gc", testing.requires.asyncio), "normal_gc"], + ) + @testing.emits_warning("The garbage collector") + @testing.requires.python3 def test_userspace_disconnectionerror_weakref_finalizer(self, detach_gced): dbapi, pool = self._queuepool_dbapi_fixture( pool_size=1, max_overflow=2, _is_asyncio=detach_gced diff --git a/test/engine/test_reconnect.py b/test/engine/test_reconnect.py index bd597a96b79..2079fbe7df9 100644 --- a/test/engine/test_reconnect.py +++ b/test/engine/test_reconnect.py @@ -1353,6 +1353,9 @@ def test_pre_ping_db_stays_shutdown(self): class InvalidateDuringResultTest(fixtures.TestBase): __backend__ = True + # test locks SQLite file databases due to unconsumed results + __requires__ = ("ad_hoc_engines",) + def setup_test(self): self.engine = engines.reconnecting_engine() self.meta = MetaData() @@ -1375,30 +1378,25 @@ def teardown_test(self): self.meta.drop_all(conn) self.engine.dispose() - @testing.crashes( - "oracle", - "cx_oracle 6 doesn't allow a close like this due to open cursors", - ) - @testing.fails_if( - [ - "+mysqlconnector", - "+mysqldb", - "+cymysql", - "+pymysql", - "+pg8000", - "+asyncpg", - "+aiosqlite", - "+aiomysql", - "+asyncmy", - ], - "Buffers the result set and doesn't check for connection close", - ) def test_invalidate_on_results(self): conn = self.engine.connect() - result = conn.exec_driver_sql("select * from sometable") + result = conn.exec_driver_sql( + "select * from sometable", + ) for x in range(20): result.fetchone() + + real_cursor = result.cursor self.engine.test_shutdown() + + def produce_side_effect(): + # will fail because connection was closed, with an exception + # that should trigger disconnect routines + real_cursor.execute("select * from sometable") + + result.cursor = Mock( + fetchone=mock.Mock(side_effect=produce_side_effect) + ) try: _assert_invalidated(result.fetchone) assert conn.invalidated diff --git a/tox.ini b/tox.ini index 1c95f068e46..092104a4485 100644 --- a/tox.ini +++ b/tox.ini @@ -28,7 +28,7 @@ deps= sqlite_file: .[aiosqlite] sqlite_file: .[sqlcipher]; python_version >= '3' and python_version < '3.10' postgresql: .[postgresql] - postgresql: .[postgresql_asyncpg]; python_version >= '3' + py3{,7,8,9,10,11}-postgresql: .[postgresql_asyncpg]; python_version >= '3' postgresql: .[postgresql_pg8000]; python_version >= '3' mysql: .[mysql] @@ -40,6 +40,8 @@ deps= mssql: .[mssql] + py312: greenlet>=3.0.0a1 + dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git#egg=aiosqlite dbapimain-sqlite: git+https://github.com/coleifer/sqlcipher3.git#egg=sqlcipher3 @@ -79,11 +81,17 @@ setenv= PYTEST_COLOR={tty:--color=yes} + # pytest 'rewrite' is hitting lots of deprecation warnings under py312 and + # i can't find any way to ignore those warnings, so this turns it off + py312: PYTEST_ARGS=--assert plain + MEMUSAGE=--nomemory BASECOMMAND=python -m pytest {env:PYTEST_COLOR} --rootdir {toxinidir} --log-info=sqlalchemy.testing WORKERS={env:TOX_WORKERS:-n4 --max-worker-restart=5} + + nocext: DISABLE_SQLALCHEMY_CEXT=1 cext: REQUIRE_SQLALCHEMY_CEXT=1 cov: COVERAGE={[testenv]cov_args} @@ -103,6 +111,7 @@ setenv= postgresql: POSTGRESQL={env:TOX_POSTGRESQL:--db postgresql} py2{,7}-postgresql: POSTGRESQL={env:TOX_POSTGRESQL_PY2K:{env:TOX_POSTGRESQL:--db postgresql}} py3{,5,6,7,8,9,10,11}-postgresql: EXTRA_PG_DRIVERS={env:EXTRA_PG_DRIVERS:--dbdriver psycopg2 --dbdriver asyncpg --dbdriver pg8000} + py312-postgresql: EXTRA_PG_DRIVERS={env:EXTRA_PG_DRIVERS:--dbdriver psycopg2 --dbdriver pg8000} mysql: MYSQL={env:TOX_MYSQL:--db mysql} py2{,7}-mysql: MYSQL={env:TOX_MYSQL_PY2K:{env:TOX_MYSQL:--db mysql}} From 0afb399b4e312a6639f2da0d5f552197b158798d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 3 Jul 2023 08:09:06 -0400 Subject: [PATCH 541/632] adjust py312 fix - version detection should anticipate pyhex version on 3.12.0.0 - github actions has removed py2.7 totally - put changelog in correct place / tags - not sure what to do for py2.7 wheels for 1.4. might have to just go with there being no py2.7 wheel on pypi Change-Id: I3f0b3fa1adc3f0c4e6bb62f31e6494eeda88ba78 --- .github/workflows/run-on-pr.yaml | 4 ---- .github/workflows/run-test.yaml | 7 ------- .../changelog/{unreleased_20 => unreleased_14}/py312.rst | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 2 +- 4 files changed, 2 insertions(+), 13 deletions(-) rename doc/build/changelog/{unreleased_20 => unreleased_14}/py312.rst (72%) diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 214c79b2c32..9a944b01391 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -26,7 +26,6 @@ jobs: - "ubuntu-latest" - "ubuntu-20.04" python-version: - - "2.7" - "3.10" build-type: - "cext" @@ -34,9 +33,6 @@ jobs: architecture: - x64 exclude: - # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" - python-version: "2.7" # ubuntu-20.04 does not need to test what ubuntu-latest supports - os: "ubuntu-20.04" python-version: "3.10" diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 2a08090ef97..a2d72826371 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -32,7 +32,6 @@ jobs: - "windows-latest" - "macos-latest" python-version: - - "2.7" - "3.6" - "3.7" - "3.8" @@ -60,10 +59,6 @@ jobs: pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: - # c-extensions fail to build on windows for python 2.7 - - os: "windows-latest" - python-version: "2.7" - build-type: "cext" # linux and osx do not have x86 python - os: "ubuntu-latest" architecture: x86 @@ -72,8 +67,6 @@ jobs: - os: "macos-latest" architecture: x86 # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" - python-version: "2.7" - os: "ubuntu-latest" python-version: "3.6" # ubuntu-20.04 does not need to test what ubuntu-latest supports diff --git a/doc/build/changelog/unreleased_20/py312.rst b/doc/build/changelog/unreleased_14/py312.rst similarity index 72% rename from doc/build/changelog/unreleased_20/py312.rst rename to doc/build/changelog/unreleased_14/py312.rst index 330cebb6434..9a7a73df46f 100644 --- a/doc/build/changelog/unreleased_20/py312.rst +++ b/doc/build/changelog/unreleased_14/py312.rst @@ -1,4 +1,4 @@ .. change:: - :tags: installation + :tags: platform, usecase Compatibility improvements to work fully with Python 3.12 diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 20f1536529f..00eddc4475b 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -21,7 +21,7 @@ typedef Py_ssize_t (*lenfunc)(PyObject *); typedef intargfunc ssizeargfunc; #endif -#if PY_VERSION_HEX > 0x030c0000 +#if PY_VERSION_HEX >= 0x030c0000 # define PY_RAISE_SLICE_FOR_MAPPING PyExc_KeyError #else # define PY_RAISE_SLICE_FOR_MAPPING PyExc_TypeError From e9681237daa186b0d3d49e365c0859c5ac844d2b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 30 Jun 2023 10:14:55 -0400 Subject: [PATCH 542/632] remove use of SQL expressions in "modifiers" for regexp Fixed issue where the :meth:`_sql.ColumnOperators.regexp_match` when using "flags" would not produce a "stable" cache key, that is, the cache key would keep changing each time causing cache pollution. The same issue existed for :meth:`_sql.ColumnOperators.regexp_replace` with both the flags and the actual replacement expression. The flags are now represented as fixed modifier strings rendered as safestrings rather than bound parameters, and the replacement expression is established within the primary portion of the "binary" element so that it generates an appropriate cache key. Note that as part of this change, the :paramref:`_sql.ColumnOperators.regexp_match.flags` and :paramref:`_sql.ColumnOperators.regexp_replace.flags` have been modified to render as literal strings only, whereas previously they were rendered as full SQL expressions, typically bound parameters. These parameters should always be passed as plain Python strings and not as SQL expression constructs; it's not expected that SQL expression constructs were used in practice for this parameter, so this is a backwards-incompatible change. The change also modifies the internal structure of the expression generated, for :meth:`_sql.ColumnOperators.regexp_replace` with or without flags, and for :meth:`_sql.ColumnOperators.regexp_match` with flags. Third party dialects which may have implemented regexp implementations of their own (no such dialects could be located in a search, so impact is expected to be low) would need to adjust the traversal of the structure to accommodate. Fixed issue in mostly-internal :class:`.CacheKey` construct where the ``__ne__()`` operator were not properly implemented, leading to nonsensical results when comparing :class:`.CacheKey` instances to each other. Fixes: #10042 Change-Id: I2e245f81d7ee7136ad04cf77be35f9745c5da5e5 (cherry picked from commit 2d8ff4f9171bcef9fa70dfa27f2c0cab708fd75e) --- doc/build/changelog/unreleased_14/10042.rst | 43 ++++++++++++++++ lib/sqlalchemy/dialects/mysql/base.py | 17 +++---- lib/sqlalchemy/dialects/oracle/base.py | 17 +++---- lib/sqlalchemy/dialects/postgresql/base.py | 19 +++---- lib/sqlalchemy/sql/compiler.py | 4 +- lib/sqlalchemy/sql/default_comparator.py | 56 ++++++++++----------- lib/sqlalchemy/sql/operators.py | 25 +++++++-- lib/sqlalchemy/sql/traversals.py | 5 +- test/dialect/mysql/test_compiler.py | 47 ++++++++++++----- test/dialect/oracle/test_compiler.py | 37 ++++++-------- test/dialect/postgresql/test_compiler.py | 39 +++++--------- test/sql/test_compare.py | 29 +++++++++++ test/sql/test_operators.py | 6 +++ 13 files changed, 216 insertions(+), 128 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10042.rst diff --git a/doc/build/changelog/unreleased_14/10042.rst b/doc/build/changelog/unreleased_14/10042.rst new file mode 100644 index 00000000000..22487014039 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10042.rst @@ -0,0 +1,43 @@ +.. change:: + :tags: bug, sql + :tickets: 10042 + :versions: 2.0.18 + + Fixed issue where the :meth:`_sql.ColumnOperators.regexp_match` + when using "flags" would not produce a "stable" cache key, that + is, the cache key would keep changing each time causing cache pollution. + The same issue existed for :meth:`_sql.ColumnOperators.regexp_replace` + with both the flags and the actual replacement expression. + The flags are now represented as fixed modifier strings rendered as + safestrings rather than bound parameters, and the replacement + expression is established within the primary portion of the "binary" + element so that it generates an appropriate cache key. + + Note that as part of this change, the + :paramref:`_sql.ColumnOperators.regexp_match.flags` and + :paramref:`_sql.ColumnOperators.regexp_replace.flags` have been modified to + render as literal strings only, whereas previously they were rendered as + full SQL expressions, typically bound parameters. These parameters should + always be passed as plain Python strings and not as SQL expression + constructs; it's not expected that SQL expression constructs were used in + practice for this parameter, so this is a backwards-incompatible change. + + The change also modifies the internal structure of the expression + generated, for :meth:`_sql.ColumnOperators.regexp_replace` with or without + flags, and for :meth:`_sql.ColumnOperators.regexp_match` with flags. Third + party dialects which may have implemented regexp implementations of their + own (no such dialects could be located in a search, so impact is expected + to be low) would need to adjust the traversal of the structure to + accommodate. + + +.. change:: + :tags: bug, sql + :versions: 2.0.18 + + Fixed issue in mostly-internal :class:`.CacheKey` construct where the + ``__ne__()`` operator were not properly implemented, leading to nonsensical + results when comparing :class:`.CacheKey` instances to each other. + + + diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 9948602d3db..73cb1ac09a6 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1650,7 +1650,7 @@ def visit_is_not_distinct_from_binary(self, binary, operator, **kw): def _mariadb_regexp_flags(self, flags, pattern, **kw): return "CONCAT('(?', %s, ')', %s)" % ( - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), self.process(pattern, **kw), ) @@ -1668,7 +1668,7 @@ def _regexp_match(self, op_string, binary, operator, **kw): text = "REGEXP_LIKE(%s, %s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) if op_string == " NOT REGEXP ": return "NOT %s" % text @@ -1683,25 +1683,22 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): def visit_regexp_replace_op_binary(self, binary, operator, **kw): flags = binary.modifiers["flags"] - replacement = binary.modifiers["replacement"] if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(replacement, **kw), ) elif self.dialect.is_mariadb: return "REGEXP_REPLACE(%s, %s, %s)" % ( self.process(binary.left, **kw), - self._mariadb_regexp_flags(flags, binary.right), - self.process(replacement, **kw), + self._mariadb_regexp_flags(flags, binary.right.clauses[0]), + self.process(binary.right.clauses[1], **kw), ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( self.process(binary.left, **kw), self.process(binary.right, **kw), - self.process(replacement, **kw), - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 390ea5098c8..1b8540b8ef4 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1281,7 +1281,7 @@ def visit_regexp_match_op_binary(self, binary, operator, **kw): return "REGEXP_LIKE(%s, %s, %s)" % ( string, pattern, - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) def visit_not_regexp_match_op_binary(self, binary, operator, **kw): @@ -1291,21 +1291,18 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): def visit_regexp_replace_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) - pattern = self.process(binary.right, **kw) - replacement = self.process(binary.modifiers["replacement"], **kw) + pattern_replace = self.process(binary.right, **kw) flags = binary.modifiers["flags"] if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( string, - pattern, - replacement, + pattern_replace, ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( string, - pattern, - replacement, - self.process(flags, **kw), + pattern_replace, + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 61e9645626b..a73569b1a7f 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -2391,14 +2391,14 @@ def _regexp_match(self, base_op, binary, operator, kw): return self._generate_generic_binary( binary, " %s " % base_op, **kw ) - if isinstance(flags, elements.BindParameter) and flags.value == "i": + if flags == "i": return self._generate_generic_binary( binary, " %s* " % base_op, **kw ) return "%s %s CONCAT('(?', %s, ')', %s)" % ( self.process(binary.left, **kw), base_op, - self.process(flags, **kw), + self.render_literal_value(flags, sqltypes.STRINGTYPE), self.process(binary.right, **kw), ) @@ -2410,21 +2410,18 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): def visit_regexp_replace_op_binary(self, binary, operator, **kw): string = self.process(binary.left, **kw) - pattern = self.process(binary.right, **kw) + pattern_replace = self.process(binary.right, **kw) flags = binary.modifiers["flags"] - replacement = self.process(binary.modifiers["replacement"], **kw) if flags is None: - return "REGEXP_REPLACE(%s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s)" % ( string, - pattern, - replacement, + pattern_replace, ) else: - return "REGEXP_REPLACE(%s, %s, %s, %s)" % ( + return "REGEXP_REPLACE(%s, %s, %s)" % ( string, - pattern, - replacement, - self.process(flags, **kw), + pattern_replace, + self.render_literal_value(flags, sqltypes.STRINGTYPE), ) def visit_empty_set_expr(self, element_types): diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index a8d0674604c..0a460b8c091 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -4501,11 +4501,9 @@ def visit_not_regexp_match_op_binary(self, binary, operator, **kw): return self._generate_generic_binary(binary, " ", **kw) def visit_regexp_replace_op_binary(self, binary, operator, **kw): - replacement = binary.modifiers["replacement"] - return "(%s, %s, %s)" % ( + return "(%s, %s)" % ( binary.left._compiler_dispatch(self, **kw), binary.right._compiler_dispatch(self, **kw), - replacement._compiler_dispatch(self, **kw), ) diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 73a1c0351b4..bb446748086 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -264,41 +264,41 @@ def _collate_impl(expr, op, other, **kw): def _regexp_match_impl(expr, op, pattern, flags, **kw): - if flags is not None: - flags = coercions.expect( + return BinaryExpression( + expr, + coercions.expect( roles.BinaryElementRole, - flags, + pattern, expr=expr, - operator=operators.regexp_replace_op, - ) - return _boolean_compare( - expr, + operator=operators.comma_op, + ), op, - pattern, - flags=flags, - negate=operators.not_regexp_match_op - if op is operators.regexp_match_op - else operators.regexp_match_op, - **kw + negate=operators.not_regexp_match_op, + modifiers={"flags": flags}, ) def _regexp_replace_impl(expr, op, pattern, replacement, flags, **kw): - replacement = coercions.expect( - roles.BinaryElementRole, - replacement, - expr=expr, - operator=operators.regexp_replace_op, - ) - if flags is not None: - flags = coercions.expect( - roles.BinaryElementRole, - flags, - expr=expr, - operator=operators.regexp_replace_op, - ) - return _binary_operate( - expr, op, pattern, replacement=replacement, flags=flags, **kw + return BinaryExpression( + expr, + ClauseList( + coercions.expect( + roles.BinaryElementRole, + pattern, + expr=expr, + operator=operators.comma_op, + ), + coercions.expect( + roles.BinaryElementRole, + replacement, + expr=expr, + operator=operators.comma_op, + ), + operator=operators.comma_op, + group=False, + ), + op, + modifiers={"flags": flags}, ) diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 2ce1add26f8..b6e9e27b8cc 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1037,8 +1037,8 @@ def regexp_match(self, pattern, flags=None): :param pattern: The regular expression pattern string or column clause. - :param flags: Any regular expression string flags to apply. Flags - tend to be backend specific. It can be a string or a column clause. + :param flags: Any regular expression string flags to apply, passed as + plain Python string only. These flags are backend specific. Some backends, like PostgreSQL and MariaDB, may alternatively specify the flags as part of the pattern. When using the ignore case flag 'i' in PostgreSQL, the ignore case @@ -1046,6 +1046,14 @@ def regexp_match(self, pattern, flags=None): .. versionadded:: 1.4 + .. versionchanged:: 1.4.48, 2.0.18 Note that due to an implementation + error, the "flags" parameter previously accepted SQL expression + objects such as column expressions in addition to plain Python + strings. This implementation did not work correctly with caching + and was removed; strings only should be passed for the "flags" + parameter, as these flags are rendered as literal inline values + within SQL expressions. + .. seealso:: :meth:`_sql.ColumnOperators.regexp_replace` @@ -1080,13 +1088,22 @@ def regexp_replace(self, pattern, replacement, flags=None): :param pattern: The regular expression pattern string or column clause. :param pattern: The replacement string or column clause. - :param flags: Any regular expression string flags to apply. Flags - tend to be backend specific. It can be a string or a column clause. + :param flags: Any regular expression string flags to apply, passed as + plain Python string only. These flags are backend specific. Some backends, like PostgreSQL and MariaDB, may alternatively specify the flags as part of the pattern. .. versionadded:: 1.4 + .. versionchanged:: 1.4.48, 2.0.18 Note that due to an implementation + error, the "flags" parameter previously accepted SQL expression + objects such as column expressions in addition to plain Python + strings. This implementation did not work correctly with caching + and was removed; strings only should be passed for the "flags" + parameter, as these flags are rendered as literal inline values + within SQL expressions. + + .. seealso:: :meth:`_sql.ColumnOperators.regexp_match` diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index de97b9de94c..fd20bbc4cf5 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -383,7 +383,10 @@ def to_offline_string(self, statement_cache, statement, parameters): return repr((sql_str, param_tuple)) def __eq__(self, other): - return self.key == other.key + return bool(self.key == other.key) + + def __ne__(self, other): + return not (self.key == other.key) @classmethod def _diff_tuples(cls, left, right): diff --git a/test/dialect/mysql/test_compiler.py b/test/dialect/mysql/test_compiler.py index ba162b49020..bb16099cd82 100644 --- a/test/dialect/mysql/test_compiler.py +++ b/test/dialect/mysql/test_compiler.py @@ -1210,18 +1210,25 @@ def test_regexp_replace_string(self): class RegexpTestMySql(fixtures.TestBase, RegexpCommon): __dialect__ = "mysql" + def test_regexp_match_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "REGEXP_LIKE(mytable.myid, %s, 'i''g')", + checkpositional=("pattern",), + ) + def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "REGEXP_LIKE(mytable.myid, %s, %s)", - checkpositional=("pattern", "ig"), + "REGEXP_LIKE(mytable.myid, %s, 'ig')", + checkpositional=("pattern",), ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "NOT REGEXP_LIKE(mytable.myid, %s, %s)", - checkpositional=("pattern", "ig"), + "NOT REGEXP_LIKE(mytable.myid, %s, 'ig')", + checkpositional=("pattern",), ) def test_regexp_replace_flags(self): @@ -1229,26 +1236,42 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, %s, %s, %s)", - checkpositional=("pattern", "replacement", "ig"), + "REGEXP_REPLACE(mytable.myid, %s, %s, 'ig')", + checkpositional=("pattern", "replacement"), + ) + + def test_regexp_replace_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_replace( + "pattern", "replacement", flags="i'g" + ), + "REGEXP_REPLACE(mytable.myid, %s, %s, 'i''g')", + checkpositional=("pattern", "replacement"), ) class RegexpTestMariaDb(fixtures.TestBase, RegexpCommon): __dialect__ = "mariadb" + def test_regexp_match_flags_safestring(self): + self.assert_compile( + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "mytable.myid REGEXP CONCAT('(?', 'i''g', ')', %s)", + checkpositional=("pattern",), + ) + def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid REGEXP CONCAT('(?', %s, ')', %s)", - checkpositional=("ig", "pattern"), + "mytable.myid REGEXP CONCAT('(?', 'ig', ')', %s)", + checkpositional=("pattern",), ) def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid NOT REGEXP CONCAT('(?', %s, ')', %s)", - checkpositional=("ig", "pattern"), + "mytable.myid NOT REGEXP CONCAT('(?', 'ig', ')', %s)", + checkpositional=("pattern",), ) def test_regexp_replace_flags(self): @@ -1256,8 +1279,8 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, CONCAT('(?', %s, ')', %s), %s)", - checkpositional=("ig", "pattern", "replacement"), + "REGEXP_REPLACE(mytable.myid, CONCAT('(?', 'ig', ')', %s), %s)", + checkpositional=("pattern", "replacement"), ) diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 08b68f0f030..6c3e0fb706b 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -1473,14 +1473,14 @@ def test_regexp_match_str(self): def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "REGEXP_LIKE(mytable.myid, :myid_1, :myid_2)", - checkparams={"myid_1": "pattern", "myid_2": "ig"}, + "REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", + checkparams={"myid_1": "pattern"}, ) - def test_regexp_match_flags_col(self): + def test_regexp_match_flags_safestring(self): self.assert_compile( - self.table.c.myid.regexp_match("pattern", flags=self.table.c.name), - "REGEXP_LIKE(mytable.myid, :myid_1, mytable.name)", + self.table.c.myid.regexp_match("pattern", flags="i'g"), + "REGEXP_LIKE(mytable.myid, :myid_1, 'i''g')", checkparams={"myid_1": "pattern"}, ) @@ -1505,20 +1505,11 @@ def test_not_regexp_match_str(self): checkparams={"param_1": "string"}, ) - def test_not_regexp_match_flags_col(self): - self.assert_compile( - ~self.table.c.myid.regexp_match( - "pattern", flags=self.table.c.name - ), - "NOT REGEXP_LIKE(mytable.myid, :myid_1, mytable.name)", - checkparams={"myid_1": "pattern"}, - ) - def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "NOT REGEXP_LIKE(mytable.myid, :myid_1, :myid_2)", - checkparams={"myid_1": "pattern", "myid_2": "ig"}, + "NOT REGEXP_LIKE(mytable.myid, :myid_1, 'ig')", + checkparams={"myid_1": "pattern"}, ) def test_regexp_replace(self): @@ -1554,21 +1545,23 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, :myid_3)", + "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'ig')", checkparams={ "myid_1": "pattern", "myid_2": "replacement", - "myid_3": "ig", }, ) - def test_regexp_replace_flags_col(self): + def test_regexp_replace_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_replace( - "pattern", "replacement", flags=self.table.c.name + "pattern", "replacement", flags="i'g" ), - "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, mytable.name)", - checkparams={"myid_1": "pattern", "myid_2": "replacement"}, + "REGEXP_REPLACE(mytable.myid, :myid_1, :myid_2, 'i''g')", + checkparams={ + "myid_1": "pattern", + "myid_2": "replacement", + }, ) diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index e9de407c8e7..a005821cc6e 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -3182,8 +3182,8 @@ def test_regexp_match_str(self): def test_regexp_match_flags(self): self.assert_compile( self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid ~ CONCAT('(?', %(myid_1)s, ')', %(myid_2)s)", - checkparams={"myid_2": "pattern", "myid_1": "ig"}, + "mytable.myid ~ CONCAT('(?', 'ig', ')', %(myid_1)s)", + checkparams={"myid_1": "pattern"}, ) def test_regexp_match_flags_ignorecase(self): @@ -3193,13 +3193,6 @@ def test_regexp_match_flags_ignorecase(self): checkparams={"myid_1": "pattern"}, ) - def test_regexp_match_flags_col(self): - self.assert_compile( - self.table.c.myid.regexp_match("pattern", flags=self.table.c.name), - "mytable.myid ~ CONCAT('(?', mytable.name, ')', %(myid_1)s)", - checkparams={"myid_1": "pattern"}, - ) - def test_not_regexp_match(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern"), @@ -3224,8 +3217,8 @@ def test_not_regexp_match_str(self): def test_not_regexp_match_flags(self): self.assert_compile( ~self.table.c.myid.regexp_match("pattern", flags="ig"), - "mytable.myid !~ CONCAT('(?', %(myid_1)s, ')', %(myid_2)s)", - checkparams={"myid_2": "pattern", "myid_1": "ig"}, + "mytable.myid !~ CONCAT('(?', 'ig', ')', %(myid_1)s)", + checkparams={"myid_1": "pattern"}, ) def test_not_regexp_match_flags_ignorecase(self): @@ -3235,15 +3228,6 @@ def test_not_regexp_match_flags_ignorecase(self): checkparams={"myid_1": "pattern"}, ) - def test_not_regexp_match_flags_col(self): - self.assert_compile( - ~self.table.c.myid.regexp_match( - "pattern", flags=self.table.c.name - ), - "mytable.myid !~ CONCAT('(?', mytable.name, ')', %(myid_1)s)", - checkparams={"myid_1": "pattern"}, - ) - def test_regexp_replace(self): self.assert_compile( self.table.c.myid.regexp_replace("pattern", "replacement"), @@ -3277,22 +3261,23 @@ def test_regexp_replace_flags(self): self.table.c.myid.regexp_replace( "pattern", "replacement", flags="ig" ), - "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, %(myid_3)s)", + "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, 'ig')", checkparams={ "myid_1": "pattern", "myid_2": "replacement", - "myid_3": "ig", }, ) - def test_regexp_replace_flags_col(self): + def test_regexp_replace_flags_safestring(self): self.assert_compile( self.table.c.myid.regexp_replace( - "pattern", "replacement", flags=self.table.c.name + "pattern", "replacement", flags="i'g" ), - "REGEXP_REPLACE(mytable.myid, %(myid_1)s," - " %(myid_2)s, mytable.name)", - checkparams={"myid_1": "pattern", "myid_2": "replacement"}, + "REGEXP_REPLACE(mytable.myid, %(myid_1)s, %(myid_2)s, 'i''g')", + checkparams={ + "myid_1": "pattern", + "myid_2": "replacement", + }, ) @testing.combinations( diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index c8e1efbf1b7..d64deb86777 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -237,6 +237,14 @@ class CoreFixtures(object): column("q").like("somstr", escape="\\"), column("q").like("somstr", escape="X"), ), + lambda: ( + column("q").regexp_match("y", flags="ig"), + column("q").regexp_match("y", flags="q"), + column("q").regexp_match("y"), + column("q").regexp_replace("y", "z", flags="ig"), + column("q").regexp_replace("y", "z", flags="q"), + column("q").regexp_replace("y", "z"), + ), lambda: ( column("q", ARRAY(Integer))[3] == 5, column("q", ARRAY(Integer))[3:5] == 5, @@ -1108,6 +1116,27 @@ def test_values_doesnt_caches_right_now(self): is_(large_v1._generate_cache_key(), None) + @testing.combinations( + (lambda: column("x"), lambda: column("x"), lambda: column("y")), + ( + lambda: func.foo_bar(1, 2, 3), + lambda: func.foo_bar(4, 5, 6), + lambda: func.foo_bar_bat(1, 2, 3), + ), + ) + def test_cache_key_object_comparators(self, lc1, lc2, lc3): + """test ne issue detected as part of #10042""" + c1 = lc1() + c2 = lc2() + c3 = lc3() + + eq_(c1._generate_cache_key(), c2._generate_cache_key()) + ne_(c1._generate_cache_key(), c3._generate_cache_key()) + is_true(c1._generate_cache_key() == c2._generate_cache_key()) + is_false(c1._generate_cache_key() != c2._generate_cache_key()) + is_true(c1._generate_cache_key() != c3._generate_cache_key()) + is_false(c1._generate_cache_key() == c3._generate_cache_key()) + def test_cache_key(self): for fixtures_, compare_values in [ (self.fixtures, True), diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index 62f33c2ec24..a03cb21fb30 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -2486,6 +2486,12 @@ def test_like_3(self): "mytable.myid LIKE :myid_1 ESCAPE '\\'", ) + def test_like_quote_escape(self): + self.assert_compile( + self.table1.c.myid.like("somstr", escape="'"), + "mytable.myid LIKE :myid_1 ESCAPE ''''", + ) + def test_like_4(self): self.assert_compile( ~self.table1.c.myid.like("somstr", escape="\\"), From 65595ba91dd4b253ca7914ff5671ea6e0503e321 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Jul 2023 10:04:08 -0400 Subject: [PATCH 543/632] update mypy warnings / deprecation Change-Id: Ib89f80568427833561f644894791f7d68caada0d --- doc/build/orm/extensions/mypy.rst | 57 ++++++++++++++++++------------- 1 file changed, 33 insertions(+), 24 deletions(-) diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index b0d89306502..6c94ae5f712 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -6,18 +6,34 @@ Mypy / Pep-484 Support for ORM Mappings Support for :pep:`484` typing annotations as well as the MyPy_ type checking tool. +.. deprecated:: 2.0 + + **The SQLAlchemy Mypy Plugin is DEPRECATED, and will be removed possibly + as early as the SQLAlchemy 2.1 release. We would urge users to please + migrate away from it ASAP.** + + This plugin cannot be maintained across constantly changing releases + of mypy and its stability going forward CANNOT be guaranteed. + + Modern SQLAlchemy now offers fully pep-484 compliant mapping syntaxes; see + the migration guide in the SQLAlchemy 2.0 documentation for + details. + .. topic:: SQLAlchemy Mypy Plugin Status Update - **Updated February 17, 2022** + **Updated July 2023** + + For SQLAlchemy 2.0, the Mypy plugin continues to work at the level at which + it reached in the SQLAlchemy 1.4 release. SQLAlchemy 2.0 however features + an all new typing system for ORM Declarative models that removes the need + for the Mypy plugin and delivers much more consistent behavior with + generally superior capabilities. Note that this new capability is **not part + of SQLAlchemy 1.4, it is only in SQLAlchemy 2.0**. The SQLAlchemy Mypy plugin, while it has technically never left the "alpha" - stage, should **now be considered as legacy, even though it is still - necessary for full Mypy support when using SQLAlchemy 1.4**. SQLAlchemy - version 2.0, when released, will include new constructs that will allow for - construction of declarative mappings in place which will support proper - typing directly, without the need for plugins. This new feature is **not - part of SQLAlchemy 1.4, it is only in SQLAlchemy 2.0, which is not released - yet as of Feb 17, 2022**. + stage, should **now be considered as deprecated in SQLAlchemy 2.0, even + though it is still necessary for full Mypy support when using + SQLAlchemy 1.4**. The Mypy plugin itself does not solve the issue of supplying correct typing with other typing tools such as Pylance/Pyright, Pytype, Pycharm, etc, which @@ -29,24 +45,17 @@ MyPy_ type checking tool. patterns which are reported regularly. For these reasons, new non-regression issues reported against the Mypy - plugin are unlikely to be fixed. When SQLAlchemy 2.0 is released, it will - continue to include the plugin, which will have been updated to continue to - function as well as it does in SQLAlchemy 1.4, when running under SQLAlchemy - 2.0. **Existing code that passes Mypy checks using the plugin with - SQLAlchemy 1.4 installed will continue to pass all checks in SQLAlchemy 2.0 - without any changes required, provided the plugin is still used. The - upcoming API to be released with SQLAlchemy 2.0 is fully backwards - compatible with the SQLAlchemy 1.4 API and Mypy plugin behavior.** + plugin are unlikely to be fixed. **Existing code that passes Mypy checks + using the plugin with SQLAlchemy 1.4 installed will continue to pass all + checks in SQLAlchemy 2.0 without any changes required, provided the plugin + is still used. SQLAlchemy 2.0's API is fully + backwards compatible with the SQLAlchemy 1.4 API and Mypy plugin behavior.** End-user code that passes all checks under SQLAlchemy 1.4 with the Mypy - plugin will be able to incrementally migrate to the new structures, once - that code is running exclusively on SQLAlchemy 2.0. The change consists of - altering how the :func:`_orm.declarative_base` construct is produced, and - then the replacement of inline Declarative :class:`_schema.Column` - structures with a fully cross-compatible ``mapped_column()`` construct. Both - constructs can coexist on any declaratively mapped class. - - Code that is running exclusively on **not-released-yet** SQLAlchemy version + plugin may incrementally migrate to the new structures, once + that code is running exclusively on SQLAlchemy 2.0. + + Code that is running exclusively on SQLAlchemy version 2.0 and has fully migrated to the new declarative constructs will enjoy full compliance with pep-484 as well as working correctly within IDEs and other typing tools, without the need for plugins. From f13ce135bfcb5072c11aa0b3ad2ccf0594ffa637 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Jul 2023 13:37:25 -0400 Subject: [PATCH 544/632] changelog updates Change-Id: If3baf79906d062e1e046d7c31b38eeeefdc984bf (cherry picked from commit 0346da63f009740df2f3c1c94b80f995168d5d9e) --- doc/build/changelog/unreleased_14/mypy14.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/mypy14.rst b/doc/build/changelog/unreleased_14/mypy14.rst index 97c37514f8a..ec073c74313 100644 --- a/doc/build/changelog/unreleased_14/mypy14.rst +++ b/doc/build/changelog/unreleased_14/mypy14.rst @@ -1,5 +1,5 @@ .. change:: - :tags: bug, ext + :tags: bug, extensions :versions: 2.0.17 Fixed issue in mypy plugin for use with mypy 1.4. From a7438bba206f5ffa2a14cd9398354ba78fed7b6e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Jul 2023 13:45:01 -0400 Subject: [PATCH 545/632] - 1.4.49 --- doc/build/changelog/changelog_14.rst | 57 +++++++++++++++++++- doc/build/changelog/unreleased_14/10042.rst | 43 --------------- doc/build/changelog/unreleased_14/mypy14.rst | 5 -- doc/build/changelog/unreleased_14/py312.rst | 4 -- doc/build/conf.py | 4 +- 5 files changed, 58 insertions(+), 55 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/10042.rst delete mode 100644 doc/build/changelog/unreleased_14/mypy14.rst delete mode 100644 doc/build/changelog/unreleased_14/py312.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index ab9552ec7f5..c7b1804001b 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,62 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.49 - :include_notes_from: unreleased_14 + :released: July 5, 2023 + + .. change:: + :tags: bug, sql + :tickets: 10042 + :versions: 2.0.18 + + Fixed issue where the :meth:`_sql.ColumnOperators.regexp_match` + when using "flags" would not produce a "stable" cache key, that + is, the cache key would keep changing each time causing cache pollution. + The same issue existed for :meth:`_sql.ColumnOperators.regexp_replace` + with both the flags and the actual replacement expression. + The flags are now represented as fixed modifier strings rendered as + safestrings rather than bound parameters, and the replacement + expression is established within the primary portion of the "binary" + element so that it generates an appropriate cache key. + + Note that as part of this change, the + :paramref:`_sql.ColumnOperators.regexp_match.flags` and + :paramref:`_sql.ColumnOperators.regexp_replace.flags` have been modified to + render as literal strings only, whereas previously they were rendered as + full SQL expressions, typically bound parameters. These parameters should + always be passed as plain Python strings and not as SQL expression + constructs; it's not expected that SQL expression constructs were used in + practice for this parameter, so this is a backwards-incompatible change. + + The change also modifies the internal structure of the expression + generated, for :meth:`_sql.ColumnOperators.regexp_replace` with or without + flags, and for :meth:`_sql.ColumnOperators.regexp_match` with flags. Third + party dialects which may have implemented regexp implementations of their + own (no such dialects could be located in a search, so impact is expected + to be low) would need to adjust the traversal of the structure to + accommodate. + + + .. change:: + :tags: bug, sql + :versions: 2.0.18 + + Fixed issue in mostly-internal :class:`.CacheKey` construct where the + ``__ne__()`` operator were not properly implemented, leading to nonsensical + results when comparing :class:`.CacheKey` instances to each other. + + + + + .. change:: + :tags: bug, extensions + :versions: 2.0.17 + + Fixed issue in mypy plugin for use with mypy 1.4. + + .. change:: + :tags: platform, usecase + + Compatibility improvements to work fully with Python 3.12 .. changelog:: :version: 1.4.48 diff --git a/doc/build/changelog/unreleased_14/10042.rst b/doc/build/changelog/unreleased_14/10042.rst deleted file mode 100644 index 22487014039..00000000000 --- a/doc/build/changelog/unreleased_14/10042.rst +++ /dev/null @@ -1,43 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 10042 - :versions: 2.0.18 - - Fixed issue where the :meth:`_sql.ColumnOperators.regexp_match` - when using "flags" would not produce a "stable" cache key, that - is, the cache key would keep changing each time causing cache pollution. - The same issue existed for :meth:`_sql.ColumnOperators.regexp_replace` - with both the flags and the actual replacement expression. - The flags are now represented as fixed modifier strings rendered as - safestrings rather than bound parameters, and the replacement - expression is established within the primary portion of the "binary" - element so that it generates an appropriate cache key. - - Note that as part of this change, the - :paramref:`_sql.ColumnOperators.regexp_match.flags` and - :paramref:`_sql.ColumnOperators.regexp_replace.flags` have been modified to - render as literal strings only, whereas previously they were rendered as - full SQL expressions, typically bound parameters. These parameters should - always be passed as plain Python strings and not as SQL expression - constructs; it's not expected that SQL expression constructs were used in - practice for this parameter, so this is a backwards-incompatible change. - - The change also modifies the internal structure of the expression - generated, for :meth:`_sql.ColumnOperators.regexp_replace` with or without - flags, and for :meth:`_sql.ColumnOperators.regexp_match` with flags. Third - party dialects which may have implemented regexp implementations of their - own (no such dialects could be located in a search, so impact is expected - to be low) would need to adjust the traversal of the structure to - accommodate. - - -.. change:: - :tags: bug, sql - :versions: 2.0.18 - - Fixed issue in mostly-internal :class:`.CacheKey` construct where the - ``__ne__()`` operator were not properly implemented, leading to nonsensical - results when comparing :class:`.CacheKey` instances to each other. - - - diff --git a/doc/build/changelog/unreleased_14/mypy14.rst b/doc/build/changelog/unreleased_14/mypy14.rst deleted file mode 100644 index ec073c74313..00000000000 --- a/doc/build/changelog/unreleased_14/mypy14.rst +++ /dev/null @@ -1,5 +0,0 @@ -.. change:: - :tags: bug, extensions - :versions: 2.0.17 - - Fixed issue in mypy plugin for use with mypy 1.4. diff --git a/doc/build/changelog/unreleased_14/py312.rst b/doc/build/changelog/unreleased_14/py312.rst deleted file mode 100644 index 9a7a73df46f..00000000000 --- a/doc/build/changelog/unreleased_14/py312.rst +++ /dev/null @@ -1,4 +0,0 @@ -.. change:: - :tags: platform, usecase - - Compatibility improvements to work fully with Python 3.12 diff --git a/doc/build/conf.py b/doc/build/conf.py index 872a7d4086d..52d2753a820 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -219,9 +219,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.48" +release = "1.4.49" -release_date = "April 30, 2023" +release_date = "July 5, 2023" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 5255a6356dc2f12f463c0ad9a3ccaeb54730c075 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 5 Jul 2023 13:56:40 -0400 Subject: [PATCH 546/632] Version 1.4.50 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index c7b1804001b..d54c9f8e068 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.50 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.49 :released: July 5, 2023 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index e6c51677ff1..4c49a58148d 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.49" +__version__ = "1.4.50" def __go(lcls): From 9e1b54b64cb6f8e19877cda94dec6e67dbc610f6 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 6 Jul 2023 22:49:05 +0200 Subject: [PATCH 547/632] repair pipelines for 2.7 Change-Id: Id4766704e4f7e4da2495ebdd8f44e4618a695df5 --- .github/workflows/create-wheels.yaml | 19 ++++++++++++++- .github/workflows/run-test.yaml | 36 ++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 3 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index d88da9038c2..a00465d9fce 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -23,7 +23,6 @@ jobs: - "windows-latest" - "macos-latest" python-version: - - "2.7" - "3.6" - "3.7" - "3.8" @@ -249,12 +248,14 @@ jobs: pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" - name: Set up Python + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} uses: actions/setup-python@v4 with: python-version: ${{ steps.linux-py-version.outputs.python-version }} architecture: ${{ matrix.architecture }} - name: Check created wheel + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} # check that the wheel is compatible with the current installation. # If it is then does: # - install the created wheel without using the pypi index @@ -273,6 +274,22 @@ jobs: echo Not compatible. Skipping install. fi + - name: Check created wheel 27 + if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} + # check that the wheel is compatible with the current installation. + # - runs the tests + uses: docker://quay.io/pypa/manylinux1_x86_64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + python --version && + pip install \"greenlet<2\" \"importlib-metadata;python_version<'3.8'\" && + pip install -f dist --no-index sqlalchemy && + python -c 'from sqlalchemy.util import has_compiled_ext; assert has_compiled_ext()' && + pip install pytest pytest-xdist ${{ matrix.extra-requires }} && + pytest -n2 -q test --nomemory --notimingintensive" + - name: Upload wheels to release # upload the generated wheels to the github release uses: sqlalchemyorg/upload-release-assets@sa diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index a2d72826371..c85f5043d32 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -28,7 +28,6 @@ jobs: matrix: os: - "ubuntu-latest" - - "ubuntu-20.04" - "windows-latest" - "macos-latest" python-version: @@ -109,6 +108,40 @@ jobs: - name: Run tests run: tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + run-test-py27: + name: py27-${{ matrix.build-type }}-${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: + - "ubuntu-latest" + python-version: + - cp27-cp27m + - cp27-cp27mu + build-type: + - "cext" + - "nocext" + + fail-fast: false + + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Run tests + uses: docker://quay.io/pypa/manylinux1_x86_64 + with: + args: | + bash -c " + export PATH=/opt/python/${{ matrix.python-version }}/bin:$PATH && + sed -i 's/greenlet/greenlet<2,/g' setup.cfg && + python --version && + python -m pip install --upgrade pip && + pip install --upgrade tox setuptools && + pip list && + tox -e github-${{ matrix.build-type }} -- -q --nomemory --notimingintensive ${{ matrix.pytest-args }} + " + run-test-arm64: name: arm64-${{ matrix.python-version }}-${{ matrix.build-type }}-${{ matrix.os }} runs-on: ${{ matrix.os }} @@ -116,7 +149,6 @@ jobs: matrix: os: - "ubuntu-latest" - - "ubuntu-20.04" python-version: - cp36-cp36m - cp37-cp37m From 116a6ea1190b62eba18904f293d24d4207ea3303 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 10 Jul 2023 22:25:13 +0200 Subject: [PATCH 548/632] fix typo in test pipeline Change-Id: I596c38cfcf56552deb913b494dd10ee4b1d68462 --- .github/workflows/run-test.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index c85f5043d32..bea6fd90cfc 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -54,8 +54,6 @@ jobs: # add aiosqlite on linux - os: "ubuntu-latest" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" - - os: "ubuntu-20.04" - pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: # linux and osx do not have x86 python From acdebde0d501e8029c667e41be23ab0ea5e862aa Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 9 Aug 2023 17:26:54 -0400 Subject: [PATCH 549/632] update for latest flake8 Change-Id: Ic0282fd1de889d1dcf22a1aad6e09fe3aa074dc2 --- lib/sqlalchemy/dialects/sqlite/pysqlite.py | 2 +- lib/sqlalchemy/orm/instrumentation.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index b10c17de2a6..0c750f1e165 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -573,7 +573,7 @@ def iso_level(conn): fns.append(iso_level) - def connect(conn): + def connect(conn): # noqa: F811 for fn in fns: fn(conn) diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index a8a06f254ea..ce9809a1142 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -199,7 +199,7 @@ def _loader_impls(self): return frozenset([attr.impl for attr in self.values()]) @util.memoized_property - def mapper(self): + def mapper(self): # noqa: F811 # raises unless self.mapper has been assigned raise exc.UnmappedClassError(self.class_) diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 8dd988ef38c..c7080f85b59 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1720,7 +1720,7 @@ def filter(self, *criterion): self._where_criteria += (criterion,) @util.memoized_property - def _last_joined_entity(self): + def _last_joined_entity(self): # noqa: F811 if self._legacy_setup_joins: return _legacy_determine_last_joined_entity( self._legacy_setup_joins, self._entity_from_pre_ent_zero() From 313cc94cb578543a3c3cf3e104b0c144ada624e7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 9 Aug 2023 10:17:35 -0400 Subject: [PATCH 550/632] implement custom setstate to work around implicit type/comparator Fixed issue where unpickling of a :class:`_schema.Column` or other :class:`_sql.ColumnElement` would fail to restore the correct "comparator" object, which is used to generate SQL expressions specific to the type object. Fixes: #10213 Change-Id: I74e805024bcc0d93d549bd94757c2865b3117d72 (cherry picked from commit 9d2b83740ad5c700b28cf4ca7807c09c7338c36a) --- doc/build/changelog/unreleased_14/10213.rst | 9 +++++++++ lib/sqlalchemy/sql/elements.py | 3 +++ lib/sqlalchemy/sql/type_api.py | 16 +++++++++++++--- test/sql/test_operators.py | 18 ++++++++++++++++++ 4 files changed, 43 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10213.rst diff --git a/doc/build/changelog/unreleased_14/10213.rst b/doc/build/changelog/unreleased_14/10213.rst new file mode 100644 index 00000000000..96c17b1946f --- /dev/null +++ b/doc/build/changelog/unreleased_14/10213.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 10213 + :versions: 2.0.20 + + Fixed issue where unpickling of a :class:`_schema.Column` or other + :class:`_sql.ColumnElement` would fail to restore the correct "comparator" + object, which is used to generate SQL expressions specific to the type + object. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index a89273e4da7..4eac2262853 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -850,6 +850,9 @@ def comparator(self): else: return comparator_factory(self) + def __setstate__(self, state): + self.__dict__.update(state) + def __getattr__(self, key): try: return getattr(self.comparator, key) diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index b404b41a5e1..25ae7eabc23 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -111,8 +111,12 @@ def _adapt_expression(self, op, other_comparator): return op, self.type + # note: this reduce is needed for tests to pass under python 2. + # it does not appear to apply to python 3. It has however been + # modified to accommodate issue #10213. In SQLA 2 this reduce + # has been removed. def __reduce__(self): - return _reconstitute_comparator, (self.expr,) + return _reconstitute_comparator, (self.expr, self.expr.type) hashable = True """Flag, if False, means values from this type aren't hashable. @@ -1945,8 +1949,14 @@ def comparator_factory(self): return self.impl.comparator_factory -def _reconstitute_comparator(expression): - return expression.comparator +def _reconstitute_comparator(expression, type_=None): + # changed for #10213, added type_ argument. + # for previous pickles, keep type_ optional + if type_ is None: + return expression.comparator + + comparator_factory = type_.comparator_factory + return comparator_factory(expression) def to_instance(typeobj, *arg, **kw): diff --git a/test/sql/test_operators.py b/test/sql/test_operators.py index a03cb21fb30..fb0ecddb382 100644 --- a/test/sql/test_operators.py +++ b/test/sql/test_operators.py @@ -1,5 +1,6 @@ import datetime import operator +import pickle from sqlalchemy import and_ from sqlalchemy import between @@ -68,6 +69,7 @@ from sqlalchemy.types import Indexable from sqlalchemy.types import JSON from sqlalchemy.types import MatchType +from sqlalchemy.types import NullType from sqlalchemy.types import TypeDecorator from sqlalchemy.types import TypeEngine from sqlalchemy.types import UserDefinedType @@ -2250,6 +2252,22 @@ def test_pickle_operators_two(self): clause = tuple_(1, 2, 3) eq_(str(clause), str(util.pickle.loads(util.pickle.dumps(clause)))) + @testing.combinations(Integer(), String(), JSON(), argnames="typ") + @testing.variation("eval_first", [True, False]) + def test_pickle_comparator(self, typ, eval_first): + """test #10213""" + + table1 = Table("t", MetaData(), Column("x", typ)) + t1 = table1.c.x + + if eval_first: + t1.comparator + + t1p = pickle.loads(pickle.dumps(table1.c.x)) + + is_not(t1p.comparator.__class__, NullType.Comparator) + is_(t1.comparator.__class__, t1p.comparator.__class__) + @testing.combinations( (operator.lt, "<", ">"), (operator.gt, ">", "<"), From 5ba341c4c6f0d7359e48568b7661c7196b67884e Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 5 Aug 2023 12:02:53 +0200 Subject: [PATCH 551/632] re-add aiomysql to the ci, remove unmaintained note References: #6893 Change-Id: Ifb70975f686eef2b7239ca266e9dbfff1f1007cb (cherry picked from commit 8bacaad859b63418c1dd6099b4a8c7f00727c23e) --- .../changelog/unreleased_14/aiomysql.rst | 6 ++++ lib/sqlalchemy/dialects/mysql/aiomysql.py | 31 ++++++++++--------- lib/sqlalchemy/dialects/mysql/asyncmy.py | 4 --- setup.cfg | 2 +- tox.ini | 3 +- 5 files changed, 25 insertions(+), 21 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/aiomysql.rst diff --git a/doc/build/changelog/unreleased_14/aiomysql.rst b/doc/build/changelog/unreleased_14/aiomysql.rst new file mode 100644 index 00000000000..ef6fc4c94f9 --- /dev/null +++ b/doc/build/changelog/unreleased_14/aiomysql.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: mysql, usecase + :versions: 2.0.20 + + Updated aiomysql dialect since the dialect appears to be maintained again. + Re-added to the ci testing using version 0.2.0. diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index 93e500c32bf..c5a74b82844 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -11,13 +11,6 @@ :connectstring: mysql+aiomysql://user:password@host:port/dbname[?key=value&key=value...] :url: https://github.com/aio-libs/aiomysql -.. warning:: The aiomysql dialect is not currently tested as part of - SQLAlchemy’s continuous integration. As of September, 2021 the driver - appears to be unmaintained and no longer functions for Python version 3.10, - and additionally depends on a significantly outdated version of PyMySQL. - Please refer to the :ref:`asyncmy` dialect for current MySQL/MariaDB asyncio - functionality. - The aiomysql dialect is SQLAlchemy's second Python asyncio dialect. Using a special asyncio mediation layer, the aiomysql dialect is usable @@ -57,7 +50,7 @@ def __init__(self, adapt_connection): self._connection = adapt_connection._connection self.await_ = adapt_connection.await_ - cursor = self._connection.cursor() + cursor = self._connection.cursor(adapt_connection.dbapi.Cursor) # see https://github.com/aio-libs/aiomysql/issues/543 self._cursor = self.await_(cursor.__aenter__()) @@ -103,10 +96,7 @@ def executemany(self, operation, seq_of_parameters): async def _execute_async(self, operation, parameters): async with self._adapt_connection._execute_mutex: - if parameters is None: - result = await self._cursor.execute(operation) - else: - result = await self._cursor.execute(operation, parameters) + result = await self._cursor.execute(operation, parameters) if not self.server_side: # aiomysql has a "fake" async result, so we have to pull it out @@ -156,9 +146,7 @@ def __init__(self, adapt_connection): self._connection = adapt_connection._connection self.await_ = adapt_connection.await_ - cursor = self._connection.cursor( - adapt_connection.dbapi.aiomysql.SSCursor - ) + cursor = self._connection.cursor(adapt_connection.dbapi.SSCursor) self._cursor = self.await_(cursor.__aenter__()) @@ -224,6 +212,7 @@ def __init__(self, aiomysql, pymysql): self.pymysql = pymysql self.paramstyle = "format" self._init_dbapi_attributes() + self.Cursor, self.SSCursor = self._init_cursors_subclasses() def _init_dbapi_attributes(self): for name in ( @@ -265,6 +254,18 @@ def connect(self, *arg, **kw): await_only(self.aiomysql.connect(*arg, **kw)), ) + def _init_cursors_subclasses(self): + # suppress unconditional warning emitted by aiomysql + class Cursor(self.aiomysql.Cursor): + async def _show_warnings(self, conn): + pass + + class SSCursor(self.aiomysql.SSCursor): + async def _show_warnings(self, conn): + pass + + return Cursor, SSCursor + class MySQLDialect_aiomysql(MySQLDialect_pymysql): driver = "aiomysql" diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index a27f24bab9c..fc0ebe2798c 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -11,10 +11,6 @@ :connectstring: mysql+asyncmy://user:password@host:port/dbname[?key=value&key=value...] :url: https://github.com/long2ice/asyncmy -.. note:: The asyncmy dialect as of September, 2021 was added to provide - MySQL/MariaDB asyncio compatibility given that the :ref:`aiomysql` database - driver has become unmaintained, however asyncmy is itself very new. - Using a special asyncio mediation layer, the asyncmy dialect is usable as the backend for the :ref:`SQLAlchemy asyncio ` extension package. diff --git a/setup.cfg b/setup.cfg index b455c8a097a..2fa7b765dbb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -75,7 +75,7 @@ pymysql = pymysql<1;python_version<"3" aiomysql = %(asyncio)s - aiomysql;python_version>="3" + aiomysql>=0.2.0;python_version>="3" asyncmy = %(asyncio)s asyncmy>=0.2.3,!=0.2.4;python_version>="3" diff --git a/tox.ini b/tox.ini index 092104a4485..9a198e77d76 100644 --- a/tox.ini +++ b/tox.ini @@ -34,6 +34,7 @@ deps= mysql: .[mysql] mysql: .[pymysql] mysql: .[asyncmy]; python_version >= '3' + mysql: .[aiomysql]; python_version >= '3' # mysql: .[mariadb_connector]; python_version >= '3' oracle: .[oracle] @@ -125,7 +126,7 @@ setenv= mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql} # py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver mariadbconnector --dbdriver asyncmy} - py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver asyncmy} + py3{,7,8,9,10,11}-mysql: EXTRA_MYSQL_DRIVERS={env:EXTRA_MYSQL_DRIVERS:--dbdriver mysqldb --dbdriver pymysql --dbdriver asyncmy --dbdriver aiomysql} mssql: MSSQL={env:TOX_MSSQL:--db mssql} From 9e8b910c9a2b52de471c662caabb65e62cabf3c6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 10 Aug 2023 18:26:45 -0400 Subject: [PATCH 552/632] safe annotate QueryableAttribute inside of join() condition Fixed fundamental issue which prevented some forms of ORM "annotations" from taking place for subqueries which made use of :meth:`_sql.Select.join` against a relationship target. These annotations are used whenever a subquery is used in special situations such as within :meth:`_orm.PropComparator.and_` and other ORM-specific scenarios. Fixes: #10223 Change-Id: I40f04265a6caa0fdcbc9f1b121a35561ab4b1fcf (cherry picked from commit 6cfdc0743b7d1ebee3582f612a4f8acaa6ab42f9) --- doc/build/changelog/unreleased_14/10223.rst | 10 ++ lib/sqlalchemy/sql/annotation.py | 18 +- test/orm/test_rel_fn.py | 22 +++ test/orm/test_relationship_criteria.py | 176 ++++++++++++++++++++ 4 files changed, 224 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10223.rst diff --git a/doc/build/changelog/unreleased_14/10223.rst b/doc/build/changelog/unreleased_14/10223.rst new file mode 100644 index 00000000000..7c744240607 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10223.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: bug, orm + :tickets: 10223 + :versions: 2.0.20 + + Fixed fundamental issue which prevented some forms of ORM "annotations" + from taking place for subqueries which made use of :meth:`_sql.Select.join` + against a relationship target. These annotations are used whenever a + subquery is used in special situations such as within + :meth:`_orm.PropComparator.and_` and other ORM-specific scenarios. diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index 60e600ddf0b..f98038d6a21 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -242,6 +242,18 @@ def entity_namespace(self): annotated_classes = {} +def _safe_annotate(to_annotate, annotations): + try: + _annotate = to_annotate._annotate + except AttributeError: + # skip objects that don't actually have an `_annotate` + # attribute, namely QueryableAttribute inside of a join + # condition + return to_annotate + else: + return _annotate(annotations) + + def _deep_annotate( element, annotations, exclude=None, detect_subquery_cols=False ): @@ -272,9 +284,11 @@ def clone(elem, **kw): newelem = elem._clone(clone=clone, **kw) elif annotations != elem._annotations: if detect_subquery_cols and elem._is_immutable: - newelem = elem._clone(clone=clone, **kw)._annotate(annotations) + newelem = _safe_annotate( + elem._clone(clone=clone, **kw), annotations + ) else: - newelem = elem._annotate(annotations) + newelem = _safe_annotate(elem, annotations) else: newelem = elem newelem._copy_internals(clone=clone) diff --git a/test/orm/test_rel_fn.py b/test/orm/test_rel_fn.py index 4d8eb88b91c..a4e769d445d 100644 --- a/test/orm/test_rel_fn.py +++ b/test/orm/test_rel_fn.py @@ -1243,6 +1243,28 @@ def test_lazy_clause_remote_local_multiple_ref(self): class DeannotateCorrectlyTest(fixtures.TestBase): + def test_annotate_orm_join(self): + """test for #10223""" + from sqlalchemy.orm import declarative_base + + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + bs = relationship("B") + + class B(Base): + __tablename__ = "b" + id = Column(Integer, primary_key=True) + a_id = Column(ForeignKey(A.id)) + + stmt = select(A).join(A.bs) + + from sqlalchemy.sql import util + + util._deep_annotate(stmt, {"foo": "bar"}) + def test_pj_deannotates(self): from sqlalchemy.orm import declarative_base diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index e866fe01862..e1dc0ae29e9 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -1,6 +1,7 @@ import datetime import random +from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy import event @@ -13,9 +14,11 @@ from sqlalchemy import select from sqlalchemy import sql from sqlalchemy import String +from sqlalchemy import Table from sqlalchemy import testing from sqlalchemy.orm import aliased from sqlalchemy.orm import column_property +from sqlalchemy.orm import contains_eager from sqlalchemy.orm import defer from sqlalchemy.orm import join as orm_join from sqlalchemy.orm import joinedload @@ -29,6 +32,7 @@ from sqlalchemy.orm.decl_api import declared_attr from sqlalchemy.testing import eq_ from sqlalchemy.testing import expect_raises_message +from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertions import expect_raises from sqlalchemy.testing.assertsql import CompiledSQL from sqlalchemy.testing.fixtures import fixture_session @@ -2137,3 +2141,175 @@ def test_select_joinm2m_aliased_local_criteria(self, order_item_fixture): "JOIN items AS items_1 ON items_1.id = order_items_1.item_id " "AND items_1.description != :description_1", ) + + +class SubqueryCriteriaTest(fixtures.DeclarativeMappedTest): + """test #10223""" + + @classmethod + def setup_classes(cls): + Base = cls.DeclarativeBasic + + class Temperature(Base): + __tablename__ = "temperature" + id = Column(Integer, primary_key=True) + pointless_flag = Column(Boolean) + + class Color(Base): + __tablename__ = "color" + id = Column(Integer, primary_key=True) + name = Column(String(10)) + temperature_id = Column(ForeignKey("temperature.id")) + temperature = relationship("Temperature") + + room_connections = Table( + "room_connections", + Base.metadata, + Column( + "room_a_id", + Integer, + # mariadb does not like this FK constraint + # ForeignKey("room.id"), + primary_key=True, + ), + Column( + "room_b_id", + Integer, + # mariadb does not like this FK constraint + # ForeignKey("room.id"), + primary_key=True, + ), + ) + + class Room(Base): + __tablename__ = "room" + id = Column(Integer, primary_key=True) + token = Column(String(10)) + color_id = Column(ForeignKey("color.id")) + color = relationship("Color") + connected_rooms = relationship( + "Room", + secondary=room_connections, + primaryjoin=id == room_connections.c.room_a_id, + secondaryjoin=id == room_connections.c.room_b_id, + ) + + @classmethod + def insert_data(cls, connection): + Room, Temperature, Color = cls.classes("Room", "Temperature", "Color") + with Session(connection) as session: + warm = Temperature(pointless_flag=True) + cool = Temperature(pointless_flag=True) + session.add_all([warm, cool]) + + red = Color(name="red", temperature=warm) + orange = Color(name="orange", temperature=warm) + blue = Color(name="blue", temperature=cool) + green = Color(name="green", temperature=cool) + session.add_all([red, orange, blue, green]) + + red1 = Room(token="Red-1", color=red) + red2 = Room(token="Red-2", color=red) + orange2 = Room(token="Orange-2", color=orange) + blue1 = Room(token="Blue-1", color=blue) + blue2 = Room(token="Blue-2", color=blue) + green1 = Room(token="Green-1", color=green) + red1.connected_rooms = [red2, blue1, green1] + red2.connected_rooms = [red1, blue2, orange2] + blue1.connected_rooms = [red1, blue2, green1] + blue2.connected_rooms = [red2, blue1, orange2] + session.add_all([red1, red2, blue1, blue2, green1, orange2]) + + session.commit() + + @testing.variation( + "join_on_relationship", ["alone", "with_and", "no", "omit"] + ) + def test_selectinload(self, join_on_relationship): + Room, Temperature, Color = self.classes("Room", "Temperature", "Color") + similar_color = aliased(Color) + subquery = ( + select(Color.id) + .join( + similar_color, + similar_color.temperature_id == Color.temperature_id, + ) + .where(similar_color.name == "red") + ) + + if join_on_relationship.alone: + subquery = subquery.join(Color.temperature).where( + Temperature.pointless_flag == True + ) + elif join_on_relationship.with_and: + subquery = subquery.join( + Color.temperature.and_(Temperature.pointless_flag == True) + ) + elif join_on_relationship.no: + subquery = subquery.join( + Temperature, Color.temperature_id == Temperature.id + ).where(Temperature.pointless_flag == True) + elif join_on_relationship.omit: + pass + else: + join_on_relationship.fail() + + session = fixture_session() + room_result = session.scalars( + select(Room) + .order_by(Room.id) + .join(Room.color.and_(Color.name == "red")) + .options( + selectinload( + Room.connected_rooms.and_(Room.color_id.in_(subquery)) + ) + ) + ).unique() + + self._assert_result(room_result) + + def test_contains_eager(self): + Room, Temperature, Color = self.classes("Room", "Temperature", "Color") + similar_color = aliased(Color) + subquery = ( + select(Color.id) + .join( + similar_color, + similar_color.temperature_id == Color.temperature_id, + ) + .join(Color.temperature.and_(Temperature.pointless_flag == True)) + .where(similar_color.name == "red") + ) + + room_alias = aliased(Room) + session = fixture_session() + + room_result = session.scalars( + select(Room) + .order_by(Room.id) + .join(Room.color.and_(Color.name == "red")) + .join( + room_alias, + Room.connected_rooms.of_type(room_alias).and_( + room_alias.color_id.in_(subquery) + ), + ) + .options(contains_eager(Room.connected_rooms.of_type(room_alias))) + ).unique() + + self._assert_result(room_result) + + def _assert_result(self, room_result): + eq_( + [ + ( + each_room.token, + [room.token for room in each_room.connected_rooms], + ) + for each_room in room_result + ], + [ + ("Red-1", ["Red-2"]), + ("Red-2", ["Red-1", "Orange-2"]), + ], + ) From 8301ee6171843f5ce91ac1e1c7565cd8f4ef629c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 14 Aug 2023 14:20:12 -0400 Subject: [PATCH 553/632] update CI supported DB versions Change-Id: I6f7991242eebc2b25df96e6779881882d29eb7a1 (cherry picked from commit 41cb8f7c05c51b9ceb28dab16d348652e4bc7d82) --- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/oracle/base.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 73cb1ac09a6..1f08495ced3 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -9,7 +9,7 @@ .. dialect:: mysql :name: MySQL / MariaDB - :full_support: 5.6, 5.7, 8.0 / 10.4, 10.5 + :full_support: 5.6, 5.7, 8.0 / 10.8, 10.9 :normal_support: 5.6+ / 10+ :best_effort: 5.0.2+ / 5.0.2+ diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 1b8540b8ef4..6e72ebe8b5b 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -8,7 +8,7 @@ r""" .. dialect:: oracle :name: Oracle - :full_support: 11.2, 18c + :full_support: 18c :normal_support: 11+ :best_effort: 8+ diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index a73569b1a7f..f4f0d3a62e3 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -8,7 +8,7 @@ r""" .. dialect:: postgresql :name: PostgreSQL - :full_support: 9.6, 10, 11, 12, 13, 14 + :full_support: 12, 13, 14, 15 :normal_support: 9.6+ :best_effort: 8+ diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 61a0a97df19..ea91a322807 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -8,7 +8,7 @@ r""" .. dialect:: sqlite :name: SQLite - :full_support: 3.21, 3.28+ + :full_support: 3.36.0 :normal_support: 3.12+ :best_effort: 3.7.16+ From 1cba74c47feb30875ed8ca5891b95c789e2b186f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 15 Aug 2023 09:41:50 -0400 Subject: [PATCH 554/632] fix test for mysql Change-Id: Ib05d950a4284412d2daf9b315314c46a70d9cdc7 --- test/orm/test_relationship_criteria.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index e1dc0ae29e9..d93f1fc8f30 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -2286,7 +2286,7 @@ def test_contains_eager(self): room_result = session.scalars( select(Room) - .order_by(Room.id) + .order_by(Room.id, room_alias.id) .join(Room.color.and_(Color.name == "red")) .join( room_alias, From 0cc93aa994eee8dddef82b26f0ddf2f3541cea77 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 9 Aug 2023 23:27:21 +0200 Subject: [PATCH 555/632] Fix rendering of order in sequences and identity columns. Fixes the rendering of the Oracle only ``order`` attribute in Sequence and Identity that was passed also when rendering the DDL in PostgreSQL. Fixes: #10207 Change-Id: I5b918eab38ba68fa10a213a79e2bd0cc48401a02 (cherry picked from commit 5615ab52c81e2343330069f91ec3544840519956) --- doc/build/changelog/unreleased_14/10207.rst | 12 ++++++++++++ lib/sqlalchemy/dialects/oracle/base.py | 5 +++-- lib/sqlalchemy/sql/compiler.py | 2 -- test/dialect/oracle/test_compiler.py | 2 +- test/sql/test_identity_column.py | 17 ++++++----------- test/sql/test_sequences.py | 12 ++++++++++-- 6 files changed, 32 insertions(+), 18 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10207.rst diff --git a/doc/build/changelog/unreleased_14/10207.rst b/doc/build/changelog/unreleased_14/10207.rst new file mode 100644 index 00000000000..aef31e6a42f --- /dev/null +++ b/doc/build/changelog/unreleased_14/10207.rst @@ -0,0 +1,12 @@ +.. change:: + :tags: schema, bug + :tickets: 10207 + :versions: 2.0.21 + + Modified the rendering of the Oracle only :paramref:`.Identity.order` + parameter that's part of both :class:`.Sequence` and :class:`.Identity` to + only take place for the Oracle backend, and not other backends such as that + of PostgreSQL. A future release will rename the + :paramref:`.Identity.order`, :paramref:`.Sequence.order` and + :paramref:`.Identity.on_null` parameters to Oracle-specific names, + deprecating the old names, these parameters only apply to Oracle. diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 6e72ebe8b5b..2e49b202c6d 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1381,8 +1381,9 @@ def get_identity_options(self, identity_options): text = text.replace("NO MINVALUE", "NOMINVALUE") text = text.replace("NO MAXVALUE", "NOMAXVALUE") text = text.replace("NO CYCLE", "NOCYCLE") - text = text.replace("NO ORDER", "NOORDER") - return text + if identity_options.order is not None: + text += " ORDER" if identity_options.order else " NOORDER" + return text.strip() def visit_computed_column(self, generated): text = "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process( diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 0a460b8c091..e72e2f8c045 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -4790,8 +4790,6 @@ def get_identity_options(self, identity_options): text.append("NO MAXVALUE") if identity_options.cache is not None: text.append("CACHE %d" % identity_options.cache) - if identity_options.order is not None: - text.append("ORDER" if identity_options.order else "NO ORDER") if identity_options.cycle is not None: text.append("CYCLE" if identity_options.cycle else "NO CYCLE") return " ".join(text) diff --git a/test/dialect/oracle/test_compiler.py b/test/dialect/oracle/test_compiler.py index 6c3e0fb706b..737c11f4680 100644 --- a/test/dialect/oracle/test_compiler.py +++ b/test/dialect/oracle/test_compiler.py @@ -1373,7 +1373,7 @@ def test_column_identity(self): schema.CreateTable(t), "CREATE TABLE t (y INTEGER GENERATED ALWAYS AS IDENTITY " "(INCREMENT BY 7 START WITH 4 NOMINVALUE NOMAXVALUE " - "NOORDER NOCYCLE))", + "NOCYCLE NOORDER))", ) def test_column_identity_no_generated(self): diff --git a/test/sql/test_identity_column.py b/test/sql/test_identity_column.py index 00404dae791..a93c5e6c507 100644 --- a/test/sql/test_identity_column.py +++ b/test/sql/test_identity_column.py @@ -55,14 +55,10 @@ class _IdentityDDLFixture(testing.AssertsCompiledSQL): "ALWAYS AS IDENTITY (START WITH 1 MAXVALUE 10 CYCLE)", ), ( - dict(always=False, cache=1000, order=True), - "BY DEFAULT AS IDENTITY (CACHE 1000 ORDER)", - ), - (dict(order=True, cycle=True), "BY DEFAULT AS IDENTITY (ORDER CYCLE)"), - ( - dict(order=False, cycle=False), - "BY DEFAULT AS IDENTITY (NO ORDER NO CYCLE)", + dict(always=False, cache=1000, cycle=False), + "BY DEFAULT AS IDENTITY (CACHE 1000 NO CYCLE)", ), + (dict(cycle=True), "BY DEFAULT AS IDENTITY (CYCLE)"), ) def test_create_ddl(self, identity_args, text): @@ -72,7 +68,6 @@ def test_create_ddl(self, identity_args, text): text = text.replace("NO MINVALUE", "NOMINVALUE") text = text.replace("NO MAXVALUE", "NOMAXVALUE") text = text.replace("NO CYCLE", "NOCYCLE") - text = text.replace("NO ORDER", "NOORDER") t = Table( "foo_table", @@ -170,7 +165,7 @@ def test_on_null(self): Column( "foo", Integer(), - Identity(always=False, on_null=True, start=42, order=True), + Identity(always=False, on_null=True, start=42, cycle=True), ), ) text = " ON NULL" if testing.against("oracle") else "" @@ -179,7 +174,7 @@ def test_on_null(self): ( "CREATE TABLE foo_table (foo INTEGER GENERATED BY DEFAULT" + text - + " AS IDENTITY (START WITH 42 ORDER))" + + " AS IDENTITY (START WITH 42 CYCLE))" ), ) @@ -268,7 +263,7 @@ def fn(**kwargs): assert_raises_message(ArgumentError, text, fn, server_onupdate="42") def test_to_metadata(self): - identity1 = Identity("by default", on_null=True, start=123) + identity1 = Identity("by default", cycle=True, start=123) m = MetaData() t = Table( "t", m, Column("x", Integer), Column("y", Integer, identity1) diff --git a/test/sql/test_sequences.py b/test/sql/test_sequences.py index a0fef99be31..5766b724e54 100644 --- a/test/sql/test_sequences.py +++ b/test/sql/test_sequences.py @@ -84,13 +84,21 @@ def test_create_drop_ddl(self): ) self.assert_compile( - CreateSequence(Sequence("foo_seq", cache=1000, order=True)), - "CREATE SEQUENCE foo_seq START WITH 1 CACHE 1000 ORDER", + CreateSequence(Sequence("foo_seq", cache=1000)), + "CREATE SEQUENCE foo_seq START WITH 1 CACHE 1000", ) + # remove this when the `order` parameter is removed + # issue #10207 - ensure ORDER does not render + self.assert_compile( + CreateSequence(Sequence("foo_seq", order=True)), + "CREATE SEQUENCE foo_seq START WITH 1", + ) + # only renders for Oracle self.assert_compile( CreateSequence(Sequence("foo_seq", order=True)), "CREATE SEQUENCE foo_seq START WITH 1 ORDER", + dialect="oracle", ) self.assert_compile( From 3437606a6bec36265e4e1c1625354a1703837bb0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 7 Sep 2023 18:28:32 -0400 Subject: [PATCH 556/632] remove ancient engine.execute() reference from main engine doc Change-Id: I51a30c2e53ad7da3e1209b5623388dd2aea7589c (cherry picked from commit c16ed9dd24a091dbaba4683ae2f04a3f199416bf) --- doc/build/core/engines.rst | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index b0dc2a3cf59..d52ad6b13c1 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -26,13 +26,16 @@ Creating an engine is just a matter of issuing a single call, engine = create_engine("postgresql://scott:tiger@localhost:5432/mydatabase") The above engine creates a :class:`.Dialect` object tailored towards -PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a DBAPI -connection at ``localhost:5432`` when a connection request is first received. -Note that the :class:`_engine.Engine` and its underlying :class:`_pool.Pool` do **not** -establish the first actual DBAPI connection until the :meth:`_engine.Engine.connect` -method is called, or an operation which is dependent on this method such as -:meth:`_engine.Engine.execute` is invoked. In this way, :class:`_engine.Engine` and -:class:`_pool.Pool` can be said to have a *lazy initialization* behavior. +PostgreSQL, as well as a :class:`_pool.Pool` object which will establish a +DBAPI connection at ``localhost:5432`` when a connection request is first +received. Note that the :class:`_engine.Engine` and its underlying +:class:`_pool.Pool` do **not** establish the first actual DBAPI connection +until the :meth:`_engine.Engine.connect` or :meth:`_engine.Engine.begin` +methods are called. Either of these methods may also be invoked by other +SQLAlchemy :class:`_engine.Engine` dependent objects such as the ORM +:class:`_orm.Session` object when they first require database connectivity. +In this way, :class:`_engine.Engine` and :class:`_pool.Pool` can be said to +have a *lazy initialization* behavior. The :class:`_engine.Engine`, once created, can either be used directly to interact with the database, or can be passed to a :class:`.Session` object to work with the ORM. This section From 836e5a4b2249e41b96c27ac27dac5c56618be860 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 5 Oct 2023 21:16:14 +0200 Subject: [PATCH 557/632] update pipelines to include 3.12 Change-Id: I894a03089e4b7eedb3e42de9554c19f9ffea8ce1 --- .github/workflows/create-wheels.yaml | 23 ++++++++++++++------- .github/workflows/run-on-pr.yaml | 11 +++------- .github/workflows/run-test.yaml | 31 +++++++++++----------------- 3 files changed, 30 insertions(+), 35 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index a00465d9fce..101f6af162b 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -28,7 +28,8 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0-rc - 3.11" + - "3.11" + - "3.12" architecture: - x64 - x86 @@ -139,6 +140,7 @@ jobs: - cp39-cp39 - cp310-cp310 - cp311-cp311 + - cp312-cp312 architecture: - x64 @@ -167,6 +169,8 @@ jobs: python-version: cp310-cp310 - os: "ubuntu-20.04" python-version: cp311-cp311 + - os: "ubuntu-20.04" + python-version: cp312-cp312 fail-fast: false @@ -200,11 +204,11 @@ jobs: (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - name: Create wheel for manylinux1 and manylinux2010 for py3 - if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' }} + if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' && matrix.python-version != 'cp311-cp311' && matrix.python-version != 'cp312-cp312' }} # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2010_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2010_x86_64 # this action generates 3 wheels in dist/. linux, manylinux1 and manylinux2010 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -220,7 +224,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_x86_64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -236,7 +240,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2010 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux1_x86_64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux1_x86_64 # this action generates 2 wheels in dist/. linux and manylinux1 with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu @@ -301,7 +305,7 @@ jobs: # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.11" - name: Publish wheel # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify @@ -334,6 +338,7 @@ jobs: - cp39-cp39 - cp310-cp310 - cp311-cp311 + - cp312-cp312 exclude: # ubuntu-latest does not have: py27, py36 - os: "ubuntu-latest" @@ -349,6 +354,8 @@ jobs: python-version: cp310-cp310 - os: "ubuntu-20.04" python-version: cp311-cp311 + - os: "ubuntu-20.04" + python-version: cp312-cp312 fail-fast: false @@ -377,7 +384,7 @@ jobs: # this step uses the image provided by pypa here https://github.com/pypa/manylinux to generate the wheels on linux # the action uses the image for manylinux2014 but can generate also a manylinux1 wheel # change the tag of this image to change the image used - uses: RalfG/python-wheels-manylinux-build@v0.5.0-manylinux2014_aarch64 + uses: RalfG/python-wheels-manylinux-build@v0.7.1-manylinux2014_aarch64 # this action generates 2 wheels in dist/. linux and manylinux2014 with: # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu @@ -415,7 +422,7 @@ jobs: # twine on py2 is very old and is no longer updated, so we change to python 3.8 before upload uses: actions/setup-python@v4 with: - python-version: "3.8" + python-version: "3.11" - name: Publish wheel # the action https://github.com/marketplace/actions/pypi-publish runs only on linux and we cannot specify diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index 9a944b01391..ab51b357b44 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -24,18 +24,13 @@ jobs: matrix: os: - "ubuntu-latest" - - "ubuntu-20.04" python-version: - - "3.10" + - "3.11" build-type: - "cext" - "nocext" architecture: - x64 - exclude: - # ubuntu-20.04 does not need to test what ubuntu-latest supports - - os: "ubuntu-20.04" - python-version: "3.10" # abort all jobs as soon as one fails fail-fast: true @@ -69,7 +64,7 @@ jobs: os: - "ubuntu-latest" python-version: - - "3.10" + - "3.11" fail-fast: false @@ -102,7 +97,7 @@ jobs: os: - "ubuntu-latest" python-version: - - "3.10" + - "3.11" fail-fast: false diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index bea6fd90cfc..c015ba4e947 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -27,6 +27,7 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: + - "ubuntu-20.04" - "ubuntu-latest" - "windows-latest" - "macos-latest" @@ -36,7 +37,8 @@ jobs: - "3.8" - "3.9" - "3.10" - - "3.11.0-rc - 3.11" + - "3.11" + - "3.12" # waiting on https://foss.heptapod.net/pypy/pypy/-/issues/3690 # which also seems to be in 3.9 # - "pypy-3.9" @@ -76,7 +78,9 @@ jobs: - os: "ubuntu-20.04" python-version: "3.10" - os: "ubuntu-20.04" - python-version: "3.11.0-rc - 3.11" + python-version: "3.11" + - os: "ubuntu-20.04" + python-version: "3.12" # pypy does not have cext # - python-version: "pypy-3.9" # build-type: "cext" @@ -154,6 +158,7 @@ jobs: - cp39-cp39 - cp310-cp310 - cp311-cp311 + - cp312-cp312 build-type: - "cext" - "nocext" @@ -204,29 +209,17 @@ jobs: matrix: os: - "ubuntu-latest" - - "ubuntu-20.04" python-version: - - "3.6" - "3.7" - "3.8" - "3.9" - "3.10" - - "3.11.0-rc - 3.11" - exclude: + - "3.11" + - "3.12" + include: # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" - python-version: "3.6" - # ubuntu-20.04 does not need to test what ubuntu-latest supports - os: "ubuntu-20.04" - python-version: "3.7" - - os: "ubuntu-20.04" - python-version: "3.8" - - os: "ubuntu-20.04" - python-version: "3.9" - - os: "ubuntu-20.04" - python-version: "3.10" - - os: "ubuntu-20.04" - python-version: "3.11.0-rc - 3.11" + python-version: "3.6" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands @@ -257,7 +250,7 @@ jobs: os: - "ubuntu-latest" python-version: - - "3.10" + - "3.11" fail-fast: false From 3ac3f074266f85694de8b2a73d2f93968554004f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 19 Oct 2023 09:59:34 -0400 Subject: [PATCH 558/632] fix 1.4 test suite add some recent fixes so we can get new 1.4 fixes merged Change-Id: I692c8adb6b22f10629b9d797c1a64334d43f13b3 --- test/dialect/postgresql/test_reflection.py | 17 +++++++++++------ test/orm/inheritance/test_basic.py | 1 + 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index f33b3bde454..807ea128198 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -303,13 +303,18 @@ def test_get_view_names_empty(self, connection): def test_get_view_definition(self, connection): insp = inspect(connection) + + def normalize(definition): + # pg16 returns "SELECT" without qualifying tablename. + # older pgs include it + definition = re.sub( + r"testtable\.(\w+)", lambda m: m.group(1), definition + ) + return re.sub(r"[\n\t ]+", " ", definition.strip()) + eq_( - re.sub( - r"[\n\t ]+", - " ", - insp.get_view_definition("test_mview").strip(), - ), - "SELECT testtable.id, testtable.data FROM testtable;", + normalize(insp.get_view_definition("test_mview")), + "SELECT id, data FROM testtable;", ) diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py index 9daafb7cefb..e2348bb8a49 100644 --- a/test/orm/inheritance/test_basic.py +++ b/test/orm/inheritance/test_basic.py @@ -171,6 +171,7 @@ def test_group_by(self): rows = ( s.query(B.id.expressions[0], B.id.expressions[1], func.sum(B.data)) .group_by(*B.id.expressions) + .order_by(B.id.expressions[0]) .all() ) eq_(rows, [(1, 1, 5), (2, 2, 7)]) From f3e0ee079abfcb4bf6b75d48982b824e759d859f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 19 Oct 2023 11:21:26 -0400 Subject: [PATCH 559/632] dont mis-render value from previous loop iteration Fixed issue where using the same bound parameter more than once with ``literal_execute=True`` in some combinations with other literal rendering parameters would cause the wrong values to render due to an iteration issue. Fixes: #10142 Change-Id: Idde314006568e3445558f0104aed9d2f4af72b56 (cherry picked from commit 9fe7c291921540df9173820d3a06b949d7a3d949) --- doc/build/changelog/unreleased_14/10142.rst | 9 +++++++++ lib/sqlalchemy/sql/compiler.py | 14 ++++++-------- test/sql/test_compiler.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 8 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10142.rst diff --git a/doc/build/changelog/unreleased_14/10142.rst b/doc/build/changelog/unreleased_14/10142.rst new file mode 100644 index 00000000000..91643c69ecf --- /dev/null +++ b/doc/build/changelog/unreleased_14/10142.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 10142 + :versions: 2.0.23 + + Fixed issue where using the same bound parameter more than once with + ``literal_execute=True`` in some combinations with other literal rendering + parameters would cause the wrong values to render due to an iteration + issue. diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index e72e2f8c045..1a71c4a4f94 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1218,14 +1218,12 @@ def _process_parameters_for_postcompile( parameter = self.binds[name] if parameter in self.literal_execute_params: if escaped_name not in replacement_expressions: - value = parameters.pop(name) - - replacement_expressions[ - escaped_name - ] = self.render_literal_bindparam( - parameter, - render_literal_value=value, - ) + replacement_expressions[ + escaped_name + ] = self.render_literal_bindparam( + parameter, + render_literal_value=parameters.pop(escaped_name), + ) continue if parameter in self.post_compile_params: diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py index 79826d2fb8d..11946513753 100644 --- a/test/sql/test_compiler.py +++ b/test/sql/test_compiler.py @@ -4285,6 +4285,21 @@ def test_construct_params_w_bind_clones_pre(self): {"myid_1": 20, "myid_2": 18}, ) + @testing.combinations("default", "default_qmark", argnames="dialect") + def test_literal_execute_combinations(self, dialect): + """test #10142""" + + a = bindparam("a", value="abc", literal_execute=True) + b = bindparam("b", value="def", literal_execute=True) + c = bindparam("c", value="ghi", literal_execute=True) + self.assert_compile( + select(a, b, a, c), + "SELECT 'abc' AS anon_1, 'def' AS anon_2, 'abc' AS anon__1, " + "'ghi' AS anon_3", + render_postcompile=True, + dialect=dialect, + ) + def test_tuple_expanding_in_no_values(self): expr = tuple_(table1.c.myid, table1.c.name).in_( [(1, "foo"), (5, "bar")] From e15323ccf6de6b3820c02c8c947b876ab399aa14 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 17 Oct 2023 18:54:23 -0400 Subject: [PATCH 560/632] revise argument to mysqlclient/pymysql ping Repaired a new incompatibility in the MySQL "pre-ping" routine where the ``False`` argument passed to ``connection.ping()``, which is intended to disable an unwanted "automatic reconnect" feature, is being deprecated in MySQL drivers and backends, and is producing warnings for some versions of MySQL's native client drivers. It's removed for mysqlclient, whereas for PyMySQL and drivers based on PyMySQL, the parameter will be deprecated and removed at some point, so API introspection is used to future proof against these various stages of removal. Fixes: #10492 Change-Id: I8a52428c6f93a03b66a605cb0b85cc5924803d6d references: #10489 (cherry picked from commit 0790c612b3d13761b04b55a5fdd7f6affd852320) --- doc/build/changelog/unreleased_14/10492.rst | 13 ++++++++ lib/sqlalchemy/dialects/mysql/mysqldb.py | 5 ++- lib/sqlalchemy/dialects/mysql/pymysql.py | 34 +++++++++++++++++++++ 3 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/10492.rst diff --git a/doc/build/changelog/unreleased_14/10492.rst b/doc/build/changelog/unreleased_14/10492.rst new file mode 100644 index 00000000000..8ddf5738b69 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10492.rst @@ -0,0 +1,13 @@ +.. change:: + :tags: bug, mysql + :tickets: 10492 + :versions: 2.0.23 + + Repaired a new incompatibility in the MySQL "pre-ping" routine where the + ``False`` argument passed to ``connection.ping()``, which is intended to + disable an unwanted "automatic reconnect" feature, is being deprecated in + MySQL drivers and backends, and is producing warnings for some versions of + MySQL's native client drivers. It's removed for mysqlclient, whereas for + PyMySQL and drivers based on PyMySQL, the parameter will be deprecated and + removed at some point, so API introspection is used to future proof against + these various stages of removal. diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 7eef5185499..ad442862385 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -165,9 +165,12 @@ def on_connect(conn): return on_connect + def _ping_impl(self, dbapi_connection): + return dbapi_connection.ping() + def do_ping(self, dbapi_connection): try: - dbapi_connection.ping(False) + self._ping_impl(dbapi_connection) except self.dbapi.Error as err: if self.is_disconnect(err, dbapi_connection, None): return False diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 3a776f8775e..26a03fa4c95 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -79,6 +79,40 @@ def supports_server_side_cursors(self): def dbapi(cls): return __import__("pymysql") + @langhelpers.memoized_property + def _send_false_to_ping(self): + """determine if pymysql has deprecated, changed the default of, + or removed the 'reconnect' argument of connection.ping(). + + See #10492 and + https://github.com/PyMySQL/mysqlclient/discussions/651#discussioncomment-7308971 + for background. + + """ # noqa: E501 + + try: + Connection = __import__("pymysql.connections").Connection + except (ImportError, AttributeError): + return True + else: + insp = langhelpers.get_callable_argspec(Connection.ping) + try: + reconnect_arg = insp.args[1] + except IndexError: + return False + else: + return reconnect_arg == "reconnect" and ( + not insp.defaults or insp.defaults[0] is not False + ) + + def _ping_impl(self, dbapi_connection): + if self._send_false_to_ping: + dbapi_connection.ping(False) + else: + dbapi_connection.ping() + + return True + def create_connect_args(self, url, _translate_args=None): if _translate_args is None: _translate_args = dict(username="user") From 9ea0bb11d9bb947c525c6c6ecc0320c47e93d893 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Fri, 20 Oct 2023 08:19:42 -0600 Subject: [PATCH 561/632] Fix identity column reflection failure Fixes: #10504 Fix reflection failure for bigint identity column with a large identity start value (more than 18 digits). Change-Id: I8a7ec114e4596b1710d789a4a4fb08013edd80ce (cherry picked from commit 4c46ed6a9f6f93abd5abe5ba4b95c4c1e8f52a4c) --- doc/build/changelog/unreleased_14/10504.rst | 8 ++++++++ lib/sqlalchemy/dialects/mssql/information_schema.py | 2 +- test/dialect/mssql/test_reflection.py | 11 +++++++++-- 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10504.rst diff --git a/doc/build/changelog/unreleased_14/10504.rst b/doc/build/changelog/unreleased_14/10504.rst new file mode 100644 index 00000000000..7afc00f6673 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10504.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mssql, reflection + :tickets: 10504 + :versions: 2.0.23 + + Fixed issue where identity column reflection would fail + for a bigint column with a large identity start value + (more than 18 digits). diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index 8ca95d79b5b..998757c1708 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -215,7 +215,7 @@ class IdentitySqlVariant(TypeDecorator): cache_ok = True def column_expression(self, colexpr): - return cast(colexpr, Numeric) + return cast(colexpr, Numeric(38, 0)) identity_columns = Table( diff --git a/test/dialect/mssql/test_reflection.py b/test/dialect/mssql/test_reflection.py index d24ee4adb9a..125959cf9d8 100644 --- a/test/dialect/mssql/test_reflection.py +++ b/test/dialect/mssql/test_reflection.py @@ -847,7 +847,11 @@ def define_tables(cls, metadata): ), ), Column("id2", Integer, Identity()), - Column("id3", sqltypes.BigInteger, Identity()), + Column( + "id3", + sqltypes.BigInteger, + Identity(start=-9223372036854775808), + ), Column("id4", sqltypes.SmallInteger, Identity()), Column("id5", sqltypes.Numeric, Identity()), ] @@ -869,7 +873,10 @@ def test_reflect_identity(self, connection): eq_(type(col["identity"]["start"]), int) eq_(type(col["identity"]["increment"]), int) elif col["name"] == "id3": - eq_(col["identity"], {"start": 1, "increment": 1}) + eq_( + col["identity"], + {"start": -9223372036854775808, "increment": 1}, + ) eq_(type(col["identity"]["start"]), util.compat.long_type) eq_(type(col["identity"]["increment"]), util.compat.long_type) elif col["name"] == "id4": From f259b104dd2bb56b1e666ea5e69c576250e2419b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 29 Oct 2023 16:20:40 -0400 Subject: [PATCH 562/632] changelog edits Change-Id: I98b1541948407ee7e2d17b6032ba271df2163919 --- doc/build/changelog/changelog_14.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index d54c9f8e068..1409f9db5c1 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -123,16 +123,17 @@ This document details individual issue-level changes made throughout :tickets: 9075 :versions: 2.0.0rc3 - Fixed bug / regression where using :func:`.bindparam()` with the same name - as a column in the :meth:`.Update.values` method of :class:`.Update`, as - well as the :meth:`.Insert.values` method of :class:`.Insert` in 2.0 only, - would in some cases silently fail to honor the SQL expression in which the - parameter were presented, replacing the expression with a new parameter of - the same name and discarding any other elements of the SQL expression, such - as SQL functions, etc. The specific case would be statements that were + Fixed bug / regression where using :func:`.bindparam()` with the same + name as a column in the :meth:`.Update.values` method of + :class:`.Update`, as well as the :meth:`_dml.Insert.values` method of + :class:`_dml.Insert` in 2.0 only, would in some cases silently fail to + honor the SQL expression in which the parameter were presented, + replacing the expression with a new parameter of the same name and + discarding any other elements of the SQL expression, such as SQL + functions, etc. The specific case would be statements that were constructed against ORM entities rather than plain :class:`.Table` instances, but would occur if the statement were invoked with a - :class:`.Session` or a :class:`.Connection`. + :class:`.Session` or a :class:`_engine.Connection`. :class:`.Update` part of the issue was present in both 2.0 and 1.4 and is backported to 1.4. From 6cd45888c0f7299125a137d9108c6eff4096561e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 29 Oct 2023 16:21:48 -0400 Subject: [PATCH 563/632] - 1.4.50 --- doc/build/changelog/changelog_14.rst | 76 ++++++++++++++++++- doc/build/changelog/unreleased_14/10142.rst | 9 --- doc/build/changelog/unreleased_14/10207.rst | 12 --- doc/build/changelog/unreleased_14/10213.rst | 9 --- doc/build/changelog/unreleased_14/10223.rst | 10 --- doc/build/changelog/unreleased_14/10492.rst | 13 ---- doc/build/changelog/unreleased_14/10504.rst | 8 -- .../changelog/unreleased_14/aiomysql.rst | 6 -- doc/build/conf.py | 4 +- 9 files changed, 77 insertions(+), 70 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/10142.rst delete mode 100644 doc/build/changelog/unreleased_14/10207.rst delete mode 100644 doc/build/changelog/unreleased_14/10213.rst delete mode 100644 doc/build/changelog/unreleased_14/10223.rst delete mode 100644 doc/build/changelog/unreleased_14/10492.rst delete mode 100644 doc/build/changelog/unreleased_14/10504.rst delete mode 100644 doc/build/changelog/unreleased_14/aiomysql.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 1409f9db5c1..8e3a04c8d06 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,81 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.50 - :include_notes_from: unreleased_14 + :released: October 29, 2023 + + .. change:: + :tags: bug, sql + :tickets: 10142 + :versions: 2.0.23 + + Fixed issue where using the same bound parameter more than once with + ``literal_execute=True`` in some combinations with other literal rendering + parameters would cause the wrong values to render due to an iteration + issue. + + .. change:: + :tags: mysql, usecase + :versions: 2.0.20 + + Updated aiomysql dialect since the dialect appears to be maintained again. + Re-added to the ci testing using version 0.2.0. + + .. change:: + :tags: bug, orm + :tickets: 10223 + :versions: 2.0.20 + + Fixed fundamental issue which prevented some forms of ORM "annotations" + from taking place for subqueries which made use of :meth:`_sql.Select.join` + against a relationship target. These annotations are used whenever a + subquery is used in special situations such as within + :meth:`_orm.PropComparator.and_` and other ORM-specific scenarios. + + .. change:: + :tags: bug, sql + :tickets: 10213 + :versions: 2.0.20 + + Fixed issue where unpickling of a :class:`_schema.Column` or other + :class:`_sql.ColumnElement` would fail to restore the correct "comparator" + object, which is used to generate SQL expressions specific to the type + object. + + .. change:: + :tags: bug, mysql + :tickets: 10492 + :versions: 2.0.23 + + Repaired a new incompatibility in the MySQL "pre-ping" routine where the + ``False`` argument passed to ``connection.ping()``, which is intended to + disable an unwanted "automatic reconnect" feature, is being deprecated in + MySQL drivers and backends, and is producing warnings for some versions of + MySQL's native client drivers. It's removed for mysqlclient, whereas for + PyMySQL and drivers based on PyMySQL, the parameter will be deprecated and + removed at some point, so API introspection is used to future proof against + these various stages of removal. + + .. change:: + :tags: schema, bug + :tickets: 10207 + :versions: 2.0.21 + + Modified the rendering of the Oracle only :paramref:`.Identity.order` + parameter that's part of both :class:`.Sequence` and :class:`.Identity` to + only take place for the Oracle backend, and not other backends such as that + of PostgreSQL. A future release will rename the + :paramref:`.Identity.order`, :paramref:`.Sequence.order` and + :paramref:`.Identity.on_null` parameters to Oracle-specific names, + deprecating the old names, these parameters only apply to Oracle. + + .. change:: + :tags: bug, mssql, reflection + :tickets: 10504 + :versions: 2.0.23 + + Fixed issue where identity column reflection would fail + for a bigint column with a large identity start value + (more than 18 digits). .. changelog:: :version: 1.4.49 diff --git a/doc/build/changelog/unreleased_14/10142.rst b/doc/build/changelog/unreleased_14/10142.rst deleted file mode 100644 index 91643c69ecf..00000000000 --- a/doc/build/changelog/unreleased_14/10142.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 10142 - :versions: 2.0.23 - - Fixed issue where using the same bound parameter more than once with - ``literal_execute=True`` in some combinations with other literal rendering - parameters would cause the wrong values to render due to an iteration - issue. diff --git a/doc/build/changelog/unreleased_14/10207.rst b/doc/build/changelog/unreleased_14/10207.rst deleted file mode 100644 index aef31e6a42f..00000000000 --- a/doc/build/changelog/unreleased_14/10207.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: schema, bug - :tickets: 10207 - :versions: 2.0.21 - - Modified the rendering of the Oracle only :paramref:`.Identity.order` - parameter that's part of both :class:`.Sequence` and :class:`.Identity` to - only take place for the Oracle backend, and not other backends such as that - of PostgreSQL. A future release will rename the - :paramref:`.Identity.order`, :paramref:`.Sequence.order` and - :paramref:`.Identity.on_null` parameters to Oracle-specific names, - deprecating the old names, these parameters only apply to Oracle. diff --git a/doc/build/changelog/unreleased_14/10213.rst b/doc/build/changelog/unreleased_14/10213.rst deleted file mode 100644 index 96c17b1946f..00000000000 --- a/doc/build/changelog/unreleased_14/10213.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 10213 - :versions: 2.0.20 - - Fixed issue where unpickling of a :class:`_schema.Column` or other - :class:`_sql.ColumnElement` would fail to restore the correct "comparator" - object, which is used to generate SQL expressions specific to the type - object. diff --git a/doc/build/changelog/unreleased_14/10223.rst b/doc/build/changelog/unreleased_14/10223.rst deleted file mode 100644 index 7c744240607..00000000000 --- a/doc/build/changelog/unreleased_14/10223.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 10223 - :versions: 2.0.20 - - Fixed fundamental issue which prevented some forms of ORM "annotations" - from taking place for subqueries which made use of :meth:`_sql.Select.join` - against a relationship target. These annotations are used whenever a - subquery is used in special situations such as within - :meth:`_orm.PropComparator.and_` and other ORM-specific scenarios. diff --git a/doc/build/changelog/unreleased_14/10492.rst b/doc/build/changelog/unreleased_14/10492.rst deleted file mode 100644 index 8ddf5738b69..00000000000 --- a/doc/build/changelog/unreleased_14/10492.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 10492 - :versions: 2.0.23 - - Repaired a new incompatibility in the MySQL "pre-ping" routine where the - ``False`` argument passed to ``connection.ping()``, which is intended to - disable an unwanted "automatic reconnect" feature, is being deprecated in - MySQL drivers and backends, and is producing warnings for some versions of - MySQL's native client drivers. It's removed for mysqlclient, whereas for - PyMySQL and drivers based on PyMySQL, the parameter will be deprecated and - removed at some point, so API introspection is used to future proof against - these various stages of removal. diff --git a/doc/build/changelog/unreleased_14/10504.rst b/doc/build/changelog/unreleased_14/10504.rst deleted file mode 100644 index 7afc00f6673..00000000000 --- a/doc/build/changelog/unreleased_14/10504.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, mssql, reflection - :tickets: 10504 - :versions: 2.0.23 - - Fixed issue where identity column reflection would fail - for a bigint column with a large identity start value - (more than 18 digits). diff --git a/doc/build/changelog/unreleased_14/aiomysql.rst b/doc/build/changelog/unreleased_14/aiomysql.rst deleted file mode 100644 index ef6fc4c94f9..00000000000 --- a/doc/build/changelog/unreleased_14/aiomysql.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: mysql, usecase - :versions: 2.0.20 - - Updated aiomysql dialect since the dialect appears to be maintained again. - Re-added to the ci testing using version 0.2.0. diff --git a/doc/build/conf.py b/doc/build/conf.py index 52d2753a820..48b76a0bfc9 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -219,9 +219,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.49" +release = "1.4.50" -release_date = "July 5, 2023" +release_date = "October 29, 2023" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 2c53fdc4503a7e5108e9b95ed22af125df6dafed Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 29 Oct 2023 16:32:30 -0400 Subject: [PATCH 564/632] Version 1.4.51 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 8e3a04c8d06..00a709e4497 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.51 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.50 :released: October 29, 2023 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 4c49a58148d..4f0c3666363 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.50" +__version__ = "1.4.51" def __go(lcls): From 9bafbd339e66177b14de71904a2c0e426ea0c711 Mon Sep 17 00:00:00 2001 From: Jack McIvor Date: Tue, 7 Nov 2023 17:55:18 +0000 Subject: [PATCH 565/632] Add trove classifier for 3.12 (#10599) (cherry picked from commit 8a583ad7bf65d5d9bf05568279723bc516de3ae3) --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 2fa7b765dbb..6ba325e8f1b 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,6 +27,7 @@ classifiers = Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 + Programming Language :: Python :: 3.12 Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: Implementation :: PyPy Topic :: Database :: Front-Ends From 82cda05d2edae60e99bfc1df6becdd7fe90656d9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 8 Nov 2023 15:20:24 -0500 Subject: [PATCH 566/632] remove . in sys.path this should not be needed and is causing problems in python 3.12 due to the presence of the "changelog" directory (cherry picked from commit 8faa17d4316772340295a677c54eccf647a221c9) --- doc/build/conf.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/build/conf.py b/doc/build/conf.py index 48b76a0bfc9..a9ee46c4d12 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -20,7 +20,9 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("../../lib")) sys.path.insert(0, os.path.abspath("../..")) # examples -sys.path.insert(0, os.path.abspath(".")) + +# was never needed, does not work as of python 3.12 due to conflicts +#sys.path.insert(0, os.path.abspath(".")) # -- General configuration -------------------------------------------------- From f04d221b62c92ce4cc9e7ee374c3ca566fd35f94 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 8 Nov 2023 21:49:06 +0100 Subject: [PATCH 567/632] Fix lint error Change-Id: Ifb53e125fc9fd759938908710b2474656dbf1ef9 (cherry picked from commit 1da3f3455dc97ad095d7abd10add7f12efe6c1c7) --- doc/build/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/conf.py b/doc/build/conf.py index a9ee46c4d12..0066ef7aad8 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -22,7 +22,7 @@ sys.path.insert(0, os.path.abspath("../..")) # examples # was never needed, does not work as of python 3.12 due to conflicts -#sys.path.insert(0, os.path.abspath(".")) +# sys.path.insert(0, os.path.abspath(".")) # -- General configuration -------------------------------------------------- From 463952866fa0dfeaa8d7acaec66fbccb0535c8dc Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 4 Nov 2023 21:32:16 +0100 Subject: [PATCH 568/632] Properly document ARRAY.contains. #10587 Change-Id: I86e4f01f5d897b257246fe5f970b78e3444aca3e (cherry picked from commit 1bb9c4b94483a25057bad3d78cf9956e8f292330) --- doc/build/changelog/changelog_13.rst | 2 +- lib/sqlalchemy/sql/sqltypes.py | 7 +++++++ setup.cfg | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/changelog_13.rst b/doc/build/changelog/changelog_13.rst index 629387ff97b..1e14314d089 100644 --- a/doc/build/changelog/changelog_13.rst +++ b/doc/build/changelog/changelog_13.rst @@ -3336,7 +3336,7 @@ :tags: change, orm :tickets: 4412 - Added a new function :func:`.close_all_sessions` which takes + Added a new function :func:`_orm.close_all_sessions` which takes over the task of the :meth:`.Session.close_all` method, which is now deprecated as this is confusing as a classmethod. Pull request courtesy Augustin Trancart. diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index eed63e5070d..36fcabea5a4 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -2846,6 +2846,13 @@ def _setup_getitem(self, index): return operators.getitem, index, return_type def contains(self, *arg, **kw): + """``ARRAY.contains()`` not implemented for the base ARRAY type. + Use the dialect-specific ARRAY type. + + .. seealso:: + + :class:`_postgresql.ARRAY` - PostgreSQL specific version. + """ raise NotImplementedError( "ARRAY.contains() not implemented for the base " "ARRAY type; please use the dialect-specific ARRAY type" diff --git a/setup.cfg b/setup.cfg index 6ba325e8f1b..307087dc0be 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,7 +108,7 @@ enable-extensions = G # E203 is due to https://github.com/PyCQA/pycodestyle/issues/373 ignore = - A003, + A003, A004 D, E203,E305,E711,E712,E721,E722,E741, N801,N802,N806, From 2ce60c25e37c3919fd63cb74064812c3c2ed783a Mon Sep 17 00:00:00 2001 From: Aleksandr Kiliushin Date: Sun, 12 Nov 2023 13:32:05 +0400 Subject: [PATCH 569/632] Fix a typo (#10620) (cherry picked from commit fc6c2b19fd7f5cb89e0c405f5aa3b3360e4c4a93) --- doc/build/intro.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/build/intro.rst b/doc/build/intro.rst index 02a546a86e1..46255e79f9d 100644 --- a/doc/build/intro.rst +++ b/doc/build/intro.rst @@ -42,7 +42,7 @@ augmented by ORM-specific automations and object-centric querying capabilities. Whereas working with Core and the SQL Expression language presents a schema-centric view of the database, along with a programming paradigm that is oriented around immutability, the ORM builds on top of this a domain-centric -view of the database with a programming paradigm that is more explcitly +view of the database with a programming paradigm that is more explicitly object-oriented and reliant upon mutability. Since a relational database is itself a mutable service, the difference is that Core/SQL Expression language is command oriented whereas the ORM is state oriented. From 3776694bd7d3d3877e2e6f6ad259e490b2f672a9 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 22 Nov 2023 22:04:03 +0100 Subject: [PATCH 570/632] Fix pre-ping regression in old PyMySQL versions. Fixed regression introduced by the fix in ticket :ticket:`10492` when using pool pre-ping with PyMySQL version older than 1.0. Fixes: #10650 Change-Id: Ic0744c8b6f91cc39868e31c3bfddb8df20c7dfbb --- doc/build/changelog/unreleased_14/10650.rst | 7 +++++++ lib/sqlalchemy/dialects/mysql/pymysql.py | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/10650.rst diff --git a/doc/build/changelog/unreleased_14/10650.rst b/doc/build/changelog/unreleased_14/10650.rst new file mode 100644 index 00000000000..dce6b4c75a5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10650.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, mysql + :tickets: 10650 + :versions: 2.0.24 + + Fixed regression introduced by the fix in ticket :ticket:`10492` when using + pool pre-ping with PyMySQL version older than 1.0. diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 26a03fa4c95..a08418096be 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -91,7 +91,9 @@ def _send_false_to_ping(self): """ # noqa: E501 try: - Connection = __import__("pymysql.connections").Connection + Connection = __import__( + "pymysql.connections" + ).connections.Connection except (ImportError, AttributeError): return True else: From 4a5e41a2d1d550f341394d06fe392c879fa8faff Mon Sep 17 00:00:00 2001 From: Yilei Yang Date: Thu, 21 Dec 2023 02:47:03 -0500 Subject: [PATCH 571/632] Use a copy of `self.contents` in this list comprehension. Improved a fix first implemented for :ticket:`3208` released in version 0.9.8, where the registry of classes used internally by declarative could be subject to a race condition in the case where individual mapped classes are being garbage collected at the same time while new mapped classes are being constructed, as can happen in some test suite configurations or dynamic class creation environments. In addition to the weakref check already added, the list of items being iterated is also copied first to avoid "list changed while iterating" errors. Pull request courtesy Yilei Yang. Fixes: #10782 Closes: #10783 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/10783 Pull-request-sha: 354e97b640430120d0c193a4efe487f293d4768b Change-Id: I04ccc92472bf1004dad0fb785e16b180f58f101d (cherry picked from commit 0fe5d3ca51884b85b4059ed05b53f02172325e70) --- doc/build/changelog/unreleased_14/10782.rst | 15 +++++++++++++++ lib/sqlalchemy/orm/clsregistry.py | 4 ++-- 2 files changed, 17 insertions(+), 2 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10782.rst diff --git a/doc/build/changelog/unreleased_14/10782.rst b/doc/build/changelog/unreleased_14/10782.rst new file mode 100644 index 00000000000..d7b219a3652 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10782.rst @@ -0,0 +1,15 @@ +.. change:: + :tags: bug, orm + :tickets: 10782 + :versions: 2.0.24, 1.4.51 + + Improved a fix first implemented for :ticket:`3208` released in version + 0.9.8, where the registry of classes used internally by declarative could + be subject to a race condition in the case where individual mapped classes + are being garbage collected at the same time while new mapped classes are + being constructed, as can happen in some test suite configurations or + dynamic class creation environments. In addition to the weakref check + already added, the list of items being iterated is also copied first to + avoid "list changed while iterating" errors. Pull request courtesy Yilei + Yang. + diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index fda5d111028..36a42f3e35b 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -164,11 +164,11 @@ def _remove_item(self, ref): def add_item(self, item): # protect against class registration race condition against # asynchronous garbage collection calling _remove_item, - # [ticket:3208] + # [ticket:3208] and [ticket:10782] modules = set( [ cls.__module__ - for cls in [ref() for ref in self.contents] + for cls in [ref() for ref in list(self.contents)] if cls is not None ] ) From bf6d5afd7406a28058951a06010be40079f3bdef Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 1 Jan 2024 17:11:11 -0500 Subject: [PATCH 572/632] 2024 setup / backport normalize files, tool setup also bumps flake8 in pre-commit to match that of 2.0 Change-Id: I2853d99bbc19c94227e2b88d450873197013bdfb --- .pre-commit-config.yaml | 10 +- lib/sqlalchemy/__init__.py | 4 +- lib/sqlalchemy/connectors/__init__.py | 2 +- lib/sqlalchemy/connectors/mxodbc.py | 2 +- lib/sqlalchemy/connectors/pyodbc.py | 2 +- lib/sqlalchemy/databases/__init__.py | 2 +- lib/sqlalchemy/dialects/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/__init__.py | 4 +- lib/sqlalchemy/dialects/firebird/base.py | 4 +- lib/sqlalchemy/dialects/firebird/fdb.py | 4 +- .../dialects/firebird/kinterbasdb.py | 4 +- lib/sqlalchemy/dialects/mssql/__init__.py | 4 +- lib/sqlalchemy/dialects/mssql/base.py | 4 +- .../dialects/mssql/information_schema.py | 4 +- lib/sqlalchemy/dialects/mssql/json.py | 6 + lib/sqlalchemy/dialects/mssql/mxodbc.py | 4 +- lib/sqlalchemy/dialects/mssql/provision.py | 6 + lib/sqlalchemy/dialects/mssql/pymssql.py | 4 +- lib/sqlalchemy/dialects/mssql/pyodbc.py | 4 +- lib/sqlalchemy/dialects/mysql/__init__.py | 4 +- lib/sqlalchemy/dialects/mysql/aiomysql.py | 4 +- lib/sqlalchemy/dialects/mysql/asyncmy.py | 4 +- lib/sqlalchemy/dialects/mysql/base.py | 4 +- lib/sqlalchemy/dialects/mysql/cymysql.py | 4 +- lib/sqlalchemy/dialects/mysql/dml.py | 6 + lib/sqlalchemy/dialects/mysql/enumerated.py | 4 +- lib/sqlalchemy/dialects/mysql/expression.py | 6 + lib/sqlalchemy/dialects/mysql/json.py | 4 +- lib/sqlalchemy/dialects/mysql/mariadb.py | 6 + .../dialects/mysql/mariadbconnector.py | 4 +- .../dialects/mysql/mysqlconnector.py | 4 +- lib/sqlalchemy/dialects/mysql/mysqldb.py | 4 +- lib/sqlalchemy/dialects/mysql/oursql.py | 4 +- lib/sqlalchemy/dialects/mysql/provision.py | 6 + lib/sqlalchemy/dialects/mysql/pymysql.py | 4 +- lib/sqlalchemy/dialects/mysql/pyodbc.py | 4 +- lib/sqlalchemy/dialects/mysql/reflection.py | 4 +- .../dialects/mysql/reserved_words.py | 4 +- lib/sqlalchemy/dialects/mysql/types.py | 4 +- lib/sqlalchemy/dialects/oracle/__init__.py | 4 +- lib/sqlalchemy/dialects/oracle/base.py | 4 +- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 3 +- lib/sqlalchemy/dialects/oracle/provision.py | 6 + .../dialects/postgresql/__init__.py | 4 +- lib/sqlalchemy/dialects/postgresql/array.py | 4 +- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 4 +- lib/sqlalchemy/dialects/postgresql/base.py | 4 +- lib/sqlalchemy/dialects/postgresql/dml.py | 4 +- lib/sqlalchemy/dialects/postgresql/ext.py | 4 +- lib/sqlalchemy/dialects/postgresql/hstore.py | 4 +- lib/sqlalchemy/dialects/postgresql/json.py | 4 +- lib/sqlalchemy/dialects/postgresql/pg8000.py | 4 +- .../dialects/postgresql/provision.py | 6 + .../dialects/postgresql/psycopg2.py | 4 +- .../dialects/postgresql/psycopg2cffi.py | 4 +- .../dialects/postgresql/pygresql.py | 4 +- .../dialects/postgresql/pypostgresql.py | 4 +- lib/sqlalchemy/dialects/postgresql/ranges.py | 3 +- lib/sqlalchemy/dialects/sqlite/__init__.py | 4 +- lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 4 +- lib/sqlalchemy/dialects/sqlite/base.py | 4 +- lib/sqlalchemy/dialects/sqlite/dml.py | 3 +- lib/sqlalchemy/dialects/sqlite/json.py | 6 + lib/sqlalchemy/dialects/sqlite/provision.py | 6 + lib/sqlalchemy/dialects/sqlite/pysqlcipher.py | 4 +- lib/sqlalchemy/dialects/sqlite/pysqlite.py | 4 +- lib/sqlalchemy/dialects/sybase/__init__.py | 4 +- lib/sqlalchemy/dialects/sybase/base.py | 4 +- lib/sqlalchemy/dialects/sybase/mxodbc.py | 4 +- lib/sqlalchemy/dialects/sybase/pyodbc.py | 4 +- lib/sqlalchemy/dialects/sybase/pysybase.py | 4 +- lib/sqlalchemy/engine/__init__.py | 2 +- lib/sqlalchemy/engine/base.py | 2 +- lib/sqlalchemy/engine/characteristics.py | 6 + lib/sqlalchemy/engine/create.py | 2 +- lib/sqlalchemy/engine/cursor.py | 2 +- lib/sqlalchemy/engine/default.py | 2 +- lib/sqlalchemy/engine/events.py | 4 +- lib/sqlalchemy/engine/interfaces.py | 2 +- lib/sqlalchemy/engine/mock.py | 2 +- lib/sqlalchemy/engine/reflection.py | 2 +- lib/sqlalchemy/engine/result.py | 2 +- lib/sqlalchemy/engine/row.py | 2 +- lib/sqlalchemy/engine/strategies.py | 2 +- lib/sqlalchemy/engine/url.py | 2 +- lib/sqlalchemy/engine/util.py | 2 +- lib/sqlalchemy/event/__init__.py | 2 +- lib/sqlalchemy/event/api.py | 2 +- lib/sqlalchemy/event/attr.py | 2 +- lib/sqlalchemy/event/base.py | 2 +- lib/sqlalchemy/event/legacy.py | 2 +- lib/sqlalchemy/event/registry.py | 2 +- lib/sqlalchemy/events.py | 4 +- lib/sqlalchemy/exc.py | 4 +- lib/sqlalchemy/ext/__init__.py | 2 +- lib/sqlalchemy/ext/associationproxy.py | 2 +- lib/sqlalchemy/ext/asyncio/__init__.py | 2 +- lib/sqlalchemy/ext/asyncio/base.py | 2 +- lib/sqlalchemy/ext/asyncio/engine.py | 2 +- lib/sqlalchemy/ext/asyncio/events.py | 2 +- lib/sqlalchemy/ext/asyncio/exc.py | 2 +- lib/sqlalchemy/ext/asyncio/result.py | 2 +- lib/sqlalchemy/ext/asyncio/scoping.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 2 +- lib/sqlalchemy/ext/automap.py | 2 +- lib/sqlalchemy/ext/baked.py | 4 +- lib/sqlalchemy/ext/compiler.py | 2 +- lib/sqlalchemy/ext/declarative/__init__.py | 2 +- lib/sqlalchemy/ext/declarative/extensions.py | 2 +- lib/sqlalchemy/ext/horizontal_shard.py | 2 +- lib/sqlalchemy/ext/hybrid.py | 2 +- lib/sqlalchemy/ext/indexable.py | 4 +- lib/sqlalchemy/ext/instrumentation.py | 6 + lib/sqlalchemy/ext/mutable.py | 2 +- lib/sqlalchemy/ext/mypy/__init__.py | 6 + lib/sqlalchemy/ext/mypy/apply.py | 6 + lib/sqlalchemy/ext/mypy/decl_class.py | 6 + lib/sqlalchemy/ext/mypy/infer.py | 6 + lib/sqlalchemy/ext/mypy/names.py | 6 + lib/sqlalchemy/ext/mypy/plugin.py | 6 + lib/sqlalchemy/ext/mypy/util.py | 6 + lib/sqlalchemy/ext/orderinglist.py | 2 +- lib/sqlalchemy/ext/serializer.py | 2 +- lib/sqlalchemy/future/__init__.py | 4 +- lib/sqlalchemy/future/engine.py | 6 + lib/sqlalchemy/future/orm/__init__.py | 4 +- lib/sqlalchemy/inspection.py | 4 +- lib/sqlalchemy/log.py | 4 +- lib/sqlalchemy/orm/__init__.py | 2 +- lib/sqlalchemy/orm/attributes.py | 2 +- lib/sqlalchemy/orm/base.py | 2 +- lib/sqlalchemy/orm/clsregistry.py | 4 +- lib/sqlalchemy/orm/collections.py | 2 +- lib/sqlalchemy/orm/context.py | 2 +- lib/sqlalchemy/orm/decl_api.py | 4 +- lib/sqlalchemy/orm/decl_base.py | 4 +- lib/sqlalchemy/orm/dependency.py | 2 +- lib/sqlalchemy/orm/descriptor_props.py | 2 +- lib/sqlalchemy/orm/dynamic.py | 2 +- lib/sqlalchemy/orm/evaluator.py | 2 +- lib/sqlalchemy/orm/events.py | 2 +- lib/sqlalchemy/orm/exc.py | 2 +- lib/sqlalchemy/orm/identity.py | 2 +- lib/sqlalchemy/orm/instrumentation.py | 2 +- lib/sqlalchemy/orm/interfaces.py | 2 +- lib/sqlalchemy/orm/loading.py | 2 +- lib/sqlalchemy/orm/mapper.py | 2 +- lib/sqlalchemy/orm/path_registry.py | 2 +- lib/sqlalchemy/orm/persistence.py | 2 +- lib/sqlalchemy/orm/properties.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- lib/sqlalchemy/orm/relationships.py | 2 +- lib/sqlalchemy/orm/scoping.py | 2 +- lib/sqlalchemy/orm/session.py | 2 +- lib/sqlalchemy/orm/state.py | 2 +- lib/sqlalchemy/orm/strategies.py | 2 +- lib/sqlalchemy/orm/strategy_options.py | 3 +- lib/sqlalchemy/orm/sync.py | 2 +- lib/sqlalchemy/orm/unitofwork.py | 2 +- lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/pool/__init__.py | 4 +- lib/sqlalchemy/pool/base.py | 4 +- lib/sqlalchemy/pool/dbapi_proxy.py | 4 +- lib/sqlalchemy/pool/events.py | 4 +- lib/sqlalchemy/pool/impl.py | 4 +- lib/sqlalchemy/processors.py | 4 +- lib/sqlalchemy/schema.py | 2 +- lib/sqlalchemy/sql/__init__.py | 2 +- lib/sqlalchemy/sql/annotation.py | 2 +- lib/sqlalchemy/sql/base.py | 2 +- lib/sqlalchemy/sql/coercions.py | 2 +- lib/sqlalchemy/sql/compiler.py | 2 +- lib/sqlalchemy/sql/crud.py | 2 +- lib/sqlalchemy/sql/ddl.py | 2 +- lib/sqlalchemy/sql/default_comparator.py | 2 +- lib/sqlalchemy/sql/dml.py | 2 +- lib/sqlalchemy/sql/elements.py | 2 +- lib/sqlalchemy/sql/events.py | 4 +- lib/sqlalchemy/sql/expression.py | 2 +- lib/sqlalchemy/sql/functions.py | 2 +- lib/sqlalchemy/sql/lambdas.py | 2 +- lib/sqlalchemy/sql/naming.py | 4 +- lib/sqlalchemy/sql/operators.py | 2 +- lib/sqlalchemy/sql/roles.py | 2 +- lib/sqlalchemy/sql/schema.py | 2 +- lib/sqlalchemy/sql/selectable.py | 2 +- lib/sqlalchemy/sql/sqltypes.py | 2 +- lib/sqlalchemy/sql/traversals.py | 6 + lib/sqlalchemy/sql/type_api.py | 4 +- lib/sqlalchemy/sql/util.py | 2 +- lib/sqlalchemy/sql/visitors.py | 2 +- lib/sqlalchemy/testing/__init__.py | 2 +- lib/sqlalchemy/testing/assertions.py | 2 +- lib/sqlalchemy/testing/assertsql.py | 2 +- lib/sqlalchemy/testing/asyncio.py | 2 +- lib/sqlalchemy/testing/config.py | 2 +- lib/sqlalchemy/testing/engines.py | 2 +- lib/sqlalchemy/testing/entities.py | 2 +- lib/sqlalchemy/testing/exclusions.py | 2 +- lib/sqlalchemy/testing/fixtures.py | 2 +- lib/sqlalchemy/testing/mock.py | 2 +- lib/sqlalchemy/testing/pickleable.py | 2 +- lib/sqlalchemy/testing/plugin/__init__.py | 6 + lib/sqlalchemy/testing/plugin/bootstrap.py | 6 + lib/sqlalchemy/testing/plugin/plugin_base.py | 4 +- lib/sqlalchemy/testing/plugin/pytestplugin.py | 6 + .../testing/plugin/reinvent_fixtures_py2k.py | 6 + lib/sqlalchemy/testing/profiling.py | 2 +- lib/sqlalchemy/testing/provision.py | 6 + lib/sqlalchemy/testing/requirements.py | 2 +- lib/sqlalchemy/testing/schema.py | 2 +- lib/sqlalchemy/testing/suite/__init__.py | 6 + lib/sqlalchemy/testing/suite/test_cte.py | 6 + lib/sqlalchemy/testing/suite/test_ddl.py | 6 + .../testing/suite/test_deprecations.py | 6 + lib/sqlalchemy/testing/suite/test_dialect.py | 6 + lib/sqlalchemy/testing/suite/test_insert.py | 6 + .../testing/suite/test_reflection.py | 6 + lib/sqlalchemy/testing/suite/test_results.py | 6 + lib/sqlalchemy/testing/suite/test_rowcount.py | 6 + lib/sqlalchemy/testing/suite/test_select.py | 6 + lib/sqlalchemy/testing/suite/test_sequence.py | 6 + lib/sqlalchemy/testing/suite/test_types.py | 6 + .../testing/suite/test_unicode_ddl.py | 6 + .../testing/suite/test_update_delete.py | 6 + lib/sqlalchemy/testing/util.py | 2 +- lib/sqlalchemy/testing/warnings.py | 2 +- lib/sqlalchemy/types.py | 2 +- lib/sqlalchemy/util/__init__.py | 2 +- lib/sqlalchemy/util/_collections.py | 2 +- lib/sqlalchemy/util/_compat_py3k.py | 2 +- lib/sqlalchemy/util/_concurrency_py3k.py | 2 +- lib/sqlalchemy/util/_preloaded.py | 2 +- lib/sqlalchemy/util/compat.py | 2 +- lib/sqlalchemy/util/concurrency.py | 2 +- lib/sqlalchemy/util/deprecations.py | 2 +- lib/sqlalchemy/util/langhelpers.py | 2 +- lib/sqlalchemy/util/queue.py | 2 +- lib/sqlalchemy/util/tool_support.py | 201 ++++++++++++++++++ lib/sqlalchemy/util/topological.py | 2 +- tools/normalize_file_headers.py | 69 ++++++ 241 files changed, 793 insertions(+), 275 deletions(-) create mode 100644 lib/sqlalchemy/util/tool_support.py create mode 100644 tools/normalize_file_headers.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a648d37d2d0..8da99d2d387 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,19 +14,23 @@ repos: - id: zimports - repo: https://github.com/pycqa/flake8 - rev: 5.0.0 + rev: 6.1.0 hooks: - id: flake8 additional_dependencies: - flake8-import-order + - flake8-import-single==0.1.5 - flake8-builtins - - flake8-docstrings>=1.3.1 + - flake8-future-annotations>=0.0.5 + - flake8-docstrings>=1.6.0 + - flake8-unused-arguments - flake8-rst-docstrings - # flake8-rst-docstrings depdendency, leaving it here + # flake8-rst-docstrings dependency, leaving it here # in case it requires a version pin - pydocstyle - pygments + - repo: local hooks: - id: black-docs diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 4f0c3666363..063ac840eb2 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ -# sqlalchemy/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# __init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py index d043c44ce65..518d2345c31 100644 --- a/lib/sqlalchemy/connectors/__init__.py +++ b/lib/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 8ed18dfedbf..df119229e6e 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index a8b9cdfae04..d0c27231ac8 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py index 6af6d0448a2..09d7ef2436a 100644 --- a/lib/sqlalchemy/databases/__init__.py +++ b/lib/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py index 0671b4d0fb9..78bf4d14b2f 100644 --- a/lib/sqlalchemy/dialects/__init__.py +++ b/lib/sqlalchemy/dialects/__init__.py @@ -1,5 +1,5 @@ # dialects/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py index c2e63f7f816..95fa165ea4d 100644 --- a/lib/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ -# firebird/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/firebird/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index 5fd24ee272a..36129b88de6 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ -# firebird/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/firebird/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index 3a093b38d68..a23c4fde09d 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ -# firebird/fdb.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/firebird/fdb.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index d2a110ebd01..5a572cec66c 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ -# firebird/kinterbasdb.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/firebird/kinterbasdb.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py index 22c38eafccc..c13f066cb84 100644 --- a/lib/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ -# mssql/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index db741d84aaf..326d9f54fcc 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ -# mssql/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index 998757c1708..7f538d6b9a0 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ -# mssql/information_schema.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/information_schema.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/json.py b/lib/sqlalchemy/dialects/mssql/json.py index d5157312c72..c857ea677ad 100644 --- a/lib/sqlalchemy/dialects/mssql/json.py +++ b/lib/sqlalchemy/dialects/mssql/json.py @@ -1,3 +1,9 @@ +# dialects/mssql/json.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import types as sqltypes # technically, all the dialect-specific datatypes that don't have any special diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index 4aceb56c6f7..f19c9f525f9 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ -# mssql/mxodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/mxodbc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/provision.py b/lib/sqlalchemy/dialects/mssql/provision.py index 56f3305a704..dd001da2467 100644 --- a/lib/sqlalchemy/dialects/mssql/provision.py +++ b/lib/sqlalchemy/dialects/mssql/provision.py @@ -1,3 +1,9 @@ +# dialects/mssql/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from sqlalchemy import inspect from sqlalchemy import Integer from ... import create_engine diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index ddb8f1fbcbb..052521fc3fe 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ -# mssql/pymssql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/pymssql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 104774d428b..ec274090da2 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ -# mssql/pyodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mssql/pyodbc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index b58e7bee989..24bead67ec0 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ -# mysql/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index c5a74b82844..18dad8e53ce 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -1,5 +1,5 @@ -# mysql/aiomysql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index fc0ebe2798c..2562795e8f7 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -1,5 +1,5 @@ -# mysql/asyncmy.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 1f08495ced3..8684c69d298 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ -# mysql/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index 889fe46105b..a1959b02385 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ -# mysql/cymysql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/cymysql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index 0c8791a0d7b..4c8b8eac1c3 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -1,3 +1,9 @@ +# dialects/mysql/dml.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ... import util from ...sql.base import _exclusive_against diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index 3f5ae77f588..a6b698781e4 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -1,5 +1,5 @@ -# mysql/enumerated.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/enumerated.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/expression.py b/lib/sqlalchemy/dialects/mysql/expression.py index 7a66e9b1428..774a8cbaef3 100644 --- a/lib/sqlalchemy/dialects/mysql/expression.py +++ b/lib/sqlalchemy/dialects/mysql/expression.py @@ -1,3 +1,9 @@ +# dialects/mysql/expression.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ... import util from ...sql import coercions diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py index bc9b62cc76e..d0cde0bbc15 100644 --- a/lib/sqlalchemy/dialects/mysql/json.py +++ b/lib/sqlalchemy/dialects/mysql/json.py @@ -1,5 +1,5 @@ -# mysql/json.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/json.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadb.py b/lib/sqlalchemy/dialects/mysql/mariadb.py index 568c3f0cf58..e64f554d2c6 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadb.py +++ b/lib/sqlalchemy/dialects/mysql/mariadb.py @@ -1,3 +1,9 @@ +# dialects/mysql/mariadb.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .base import MariaDBIdentifierPreparer from .base import MySQLDialect diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index 01c83b06a90..8e1fb39770e 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -1,5 +1,5 @@ -# mysql/mariadbconnector.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/mariadbconnector.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index bb173b335b1..59b96b045ae 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ -# mysql/mysqlconnector.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/mysqlconnector.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index ad442862385..fc639647d9a 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ -# mysql/mysqldb.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/mysqldb.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index 603ee18e9ba..ec34003c2b8 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ -# mysql/oursql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/oursql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/provision.py b/lib/sqlalchemy/dialects/mysql/provision.py index 86aaa94d94f..b8c6cd5d0e5 100644 --- a/lib/sqlalchemy/dialects/mysql/provision.py +++ b/lib/sqlalchemy/dialects/mysql/provision.py @@ -1,3 +1,9 @@ +# dialects/mysql/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import exc from ...testing.provision import configure_follower from ...testing.provision import create_db diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index a08418096be..951e21dc056 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ -# mysql/pymysql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/pymysql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 9703b82e2f6..f09668bc3f8 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ -# mysql/pyodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/pyodbc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index 7a4a46b3475..b8b21c1ba4a 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -1,5 +1,5 @@ -# mysql/reflection.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/reflection.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reserved_words.py b/lib/sqlalchemy/dialects/mysql/reserved_words.py index 8a9198e5fac..ecded855828 100644 --- a/lib/sqlalchemy/dialects/mysql/reserved_words.py +++ b/lib/sqlalchemy/dialects/mysql/reserved_words.py @@ -1,5 +1,5 @@ -# mysql/reserved_words.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/reserved_words.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index b7ba17772ab..a8a6042f897 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -1,5 +1,5 @@ -# mysql/types.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/mysql/types.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py index 7ad817f29ba..6ffeb962546 100644 --- a/lib/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ -# oracle/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/oracle/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 2e49b202c6d..2f64b9dfdae 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ -# oracle/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/oracle/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 0f4befe415a..9b0f464b2a9 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/oracle/cx_oracle.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/provision.py b/lib/sqlalchemy/dialects/oracle/provision.py index 74ad1f2a4b1..d517abec101 100644 --- a/lib/sqlalchemy/dialects/oracle/provision.py +++ b/lib/sqlalchemy/dialects/oracle/provision.py @@ -1,3 +1,9 @@ +# dialects/oracle/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import create_engine from ... import exc from ...engine import url as sa_url diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 7fb791edb39..470f6cadb0f 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -1,5 +1,5 @@ -# postgresql/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index 9f8cc39027a..a401d234b8e 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -1,5 +1,5 @@ -# postgresql/array.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/array.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index daf26a0e509..84e2998a4a6 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1,5 +1,5 @@ -# postgresql/asyncpg.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index f4f0d3a62e3..6b3af4bdae6 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ -# postgresql/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index b8d6d66729c..e9802f11990 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -1,5 +1,5 @@ -# postgresql/on_conflict.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/dml.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index eb485fe7925..47ed99f9b87 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -1,5 +1,5 @@ -# postgresql/ext.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/ext.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 65b93753da0..8d8de550a02 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ -# postgresql/hstore.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/hstore.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index 534d8711180..e6b6f58677e 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ -# postgresql/json.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/json.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index 6a3e67adc2a..09b58f83533 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ -# postgresql/pg8000.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/provision.py b/lib/sqlalchemy/dialects/postgresql/provision.py index 98470f36eb9..bc69c0f6197 100644 --- a/lib/sqlalchemy/dialects/postgresql/provision.py +++ b/lib/sqlalchemy/dialects/postgresql/provision.py @@ -1,3 +1,9 @@ +# dialects/postgresql/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import time from ... import exc diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index d26b649f789..80033d0d7ed 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ -# postgresql/psycopg2.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/psycopg2.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py index a169dd9c777..7483d3b5291 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -1,5 +1,5 @@ -# testing/engines.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/psycopg2cffi.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py index 6bae6a4f11f..d3b7df9688f 100644 --- a/lib/sqlalchemy/dialects/postgresql/pygresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -1,5 +1,5 @@ -# postgresql/pygresql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/pygresql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index aa5a0e0d7d8..f152b4a2489 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ -# postgresql/pypostgresql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/pypostgresql.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index 667b986c32d..800ff274f89 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -1,4 +1,5 @@ -# Copyright (C) 2013-2023 the SQLAlchemy authors and contributors +# dialects/postgresql/ranges.py +# Copyright (C) 2013-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py index dba10f89f54..8bde524f1ea 100644 --- a/lib/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -1,5 +1,5 @@ -# sqlite/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index b5ec6e27d94..e51ca9573d6 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -1,5 +1,5 @@ -# sqlite/aiosqlite.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/aiosqlite.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index ea91a322807..bcf38edc729 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ -# sqlite/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index b78caf277ec..7263b6a75f5 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/dml.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/json.py b/lib/sqlalchemy/dialects/sqlite/json.py index 614f95405ff..32008e94ab2 100644 --- a/lib/sqlalchemy/dialects/sqlite/json.py +++ b/lib/sqlalchemy/dialects/sqlite/json.py @@ -1,3 +1,9 @@ +# dialects/sqlite/json.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from ... import types as sqltypes diff --git a/lib/sqlalchemy/dialects/sqlite/provision.py b/lib/sqlalchemy/dialects/sqlite/provision.py index e5b17e8294f..764ffacd603 100644 --- a/lib/sqlalchemy/dialects/sqlite/provision.py +++ b/lib/sqlalchemy/dialects/sqlite/provision.py @@ -1,3 +1,9 @@ +# dialects/sqlite/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import os import re diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py index d99113f3011..1513356b942 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -1,5 +1,5 @@ -# sqlite/pysqlcipher.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/pysqlcipher.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index 0c750f1e165..f3de9b1bcb8 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ -# sqlite/pysqlite.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sqlite/pysqlite.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py index 92cda9de132..f41fa0b65a9 100644 --- a/lib/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ -# sybase/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sybase/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index f2056a22ced..4d94b3d4a53 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ -# sybase/base.py -# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors +# dialects/sybase/base.py +# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py index deae27e3eac..19d0d464885 100644 --- a/lib/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ -# sybase/mxodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sybase/mxodbc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index 43a48fe3a57..295bac25557 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ -# sybase/pyodbc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# dialects/sybase/pyodbc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index 2778f598638..140d68f4c89 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ -# sybase/pysybase.py -# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors +# dialects/sybase/pysybase.py +# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index ec699b0ec46..6e92ba201d9 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 53916cc1233..68915259e8d 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/characteristics.py b/lib/sqlalchemy/engine/characteristics.py index c00bff40d03..5cd3daaa2e1 100644 --- a/lib/sqlalchemy/engine/characteristics.py +++ b/lib/sqlalchemy/engine/characteristics.py @@ -1,3 +1,9 @@ +# engine/characteristics.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import abc from ..util import ABC diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 16c75fc217d..239bd486022 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -1,5 +1,5 @@ # engine/create.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 02f6d5a0ac1..9329ce00e9e 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1,5 +1,5 @@ # engine/cursor.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 35b6c31ce21..c93fd271405 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index 7343582317d..544e5f394c0 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/engine/events.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# engine/events.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index de78ad920ae..0363412597a 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/mock.py b/lib/sqlalchemy/engine/mock.py index b9ef04097cb..6c7c908b662 100644 --- a/lib/sqlalchemy/engine/mock.py +++ b/lib/sqlalchemy/engine/mock.py @@ -1,5 +1,5 @@ # engine/mock.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index 64b4bba6a74..38768c9c0d5 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 116245670b3..818e3068d28 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index 50577ffe8d7..cdc0c850642 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -1,5 +1,5 @@ # engine/row.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index 335ec45899d..9c04483707e 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 2defd37aae2..5b12e358bda 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index be1850dba61..f118f6641e2 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/__init__.py b/lib/sqlalchemy/event/__init__.py index cd5540b0d26..ed5e121b607 100644 --- a/lib/sqlalchemy/event/__init__.py +++ b/lib/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py index 440d5d1bd38..7855778654b 100644 --- a/lib/sqlalchemy/event/api.py +++ b/lib/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index 5a85cb91ee2..bc4321b8c92 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/base.py b/lib/sqlalchemy/event/base.py index 57d481dbd9c..d8282cebba1 100644 --- a/lib/sqlalchemy/event/base.py +++ b/lib/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index d2f1fda13ef..0416980ffc9 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py index 6bc38a3191e..b306560dcd9 100644 --- a/lib/sqlalchemy/event/registry.py +++ b/lib/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index 3a844647fd8..aafbde6ba72 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/events.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# events.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py index 20afe55bc81..5ad06faec6f 100644 --- a/lib/sqlalchemy/exc.py +++ b/lib/sqlalchemy/exc.py @@ -1,5 +1,5 @@ -# sqlalchemy/exc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# exc.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/__init__.py b/lib/sqlalchemy/ext/__init__.py index e3af738b7ce..f03ed945f35 100644 --- a/lib/sqlalchemy/ext/__init__.py +++ b/lib/sqlalchemy/ext/__init__.py @@ -1,5 +1,5 @@ # ext/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index 72c9afde675..3b284624fab 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index 11539f4e6d1..2ff1c949b2b 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -1,5 +1,5 @@ # ext/asyncio/__init__.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/base.py b/lib/sqlalchemy/ext/asyncio/base.py index bdff1019bd7..610cc1be787 100644 --- a/lib/sqlalchemy/ext/asyncio/base.py +++ b/lib/sqlalchemy/ext/asyncio/base.py @@ -1,5 +1,5 @@ # ext/asyncio/base.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index bc4956be9c4..a902d9dc3b4 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -1,5 +1,5 @@ # ext/asyncio/engine.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index f425922db74..1b0e3fc5ad8 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -1,5 +1,5 @@ # ext/asyncio/events.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/exc.py b/lib/sqlalchemy/ext/asyncio/exc.py index 3f937679b93..1cf6f363860 100644 --- a/lib/sqlalchemy/ext/asyncio/exc.py +++ b/lib/sqlalchemy/ext/asyncio/exc.py @@ -1,5 +1,5 @@ # ext/asyncio/exc.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index 31c844698ab..ef8bd8cccd7 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -1,5 +1,5 @@ # ext/asyncio/result.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 4a7d80987c9..6ecd5827e8b 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -1,5 +1,5 @@ # ext/asyncio/scoping.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 5238b8d1eb5..6a12f9e5549 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -1,5 +1,5 @@ # ext/asyncio/session.py -# Copyright (C) 2020-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index 6990a12effa..0c434dc266f 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py index 871ead2af24..7d68c3b1802 100644 --- a/lib/sqlalchemy/ext/baked.py +++ b/lib/sqlalchemy/ext/baked.py @@ -1,5 +1,5 @@ -# sqlalchemy/ext/baked.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# ext/baked.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index 79608b9c825..5b3b00ff258 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/__init__.py b/lib/sqlalchemy/ext/declarative/__init__.py index 7f435bcae1b..f89c9219bcf 100644 --- a/lib/sqlalchemy/ext/declarative/__init__.py +++ b/lib/sqlalchemy/ext/declarative/__init__.py @@ -1,5 +1,5 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index ca425501fd3..125a335aeed 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -1,5 +1,5 @@ # ext/declarative/extensions.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py index 9a6963127e2..f66f78a9109 100644 --- a/lib/sqlalchemy/ext/horizontal_shard.py +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index ab87d454db0..2e0cbb815dc 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/indexable.py b/lib/sqlalchemy/ext/indexable.py index 4ca8ac54873..1906dc71ceb 100644 --- a/lib/sqlalchemy/ext/indexable.py +++ b/lib/sqlalchemy/ext/indexable.py @@ -1,5 +1,5 @@ -# ext/index.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# ext/indexable.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/instrumentation.py b/lib/sqlalchemy/ext/instrumentation.py index 54f3e64c5d5..bfca24f243f 100644 --- a/lib/sqlalchemy/ext/instrumentation.py +++ b/lib/sqlalchemy/ext/instrumentation.py @@ -1,3 +1,9 @@ +# ext/instrumentation.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """Extensible class instrumentation. The :mod:`sqlalchemy.ext.instrumentation` package provides for alternate diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 0eac660488f..968d48e1b81 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/__init__.py b/lib/sqlalchemy/ext/mypy/__init__.py index e69de29bb2d..de2c02ee9f1 100644 --- a/lib/sqlalchemy/ext/mypy/__init__.py +++ b/lib/sqlalchemy/ext/mypy/__init__.py @@ -0,0 +1,6 @@ +# ext/mypy/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index ad81c15b1d8..8136737b91b 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -1,4 +1,10 @@ # ext/mypy/apply.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # diff --git a/lib/sqlalchemy/ext/mypy/decl_class.py b/lib/sqlalchemy/ext/mypy/decl_class.py index c33c30e2574..b3820cae131 100644 --- a/lib/sqlalchemy/ext/mypy/decl_class.py +++ b/lib/sqlalchemy/ext/mypy/decl_class.py @@ -1,4 +1,10 @@ # ext/mypy/decl_class.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index f3f44a42504..2543cd5704d 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -1,4 +1,10 @@ # ext/mypy/infer.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # diff --git a/lib/sqlalchemy/ext/mypy/names.py b/lib/sqlalchemy/ext/mypy/names.py index 8ec15a6d43a..3db240cd809 100644 --- a/lib/sqlalchemy/ext/mypy/names.py +++ b/lib/sqlalchemy/ext/mypy/names.py @@ -1,4 +1,10 @@ # ext/mypy/names.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # diff --git a/lib/sqlalchemy/ext/mypy/plugin.py b/lib/sqlalchemy/ext/mypy/plugin.py index bd2dd79d62a..5e18aec695f 100644 --- a/lib/sqlalchemy/ext/mypy/plugin.py +++ b/lib/sqlalchemy/ext/mypy/plugin.py @@ -1,4 +1,10 @@ # ext/mypy/plugin.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php + # Copyright (C) 2021 the SQLAlchemy authors and contributors # # diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index 373fd4bfbc4..30df8332c54 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -1,3 +1,9 @@ +# ext/mypy/util.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import re from typing import Any from typing import Iterable diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index b13576b5da2..0af05f6de75 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 4d66723361c..987ec1f544d 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py index 1472c8226a4..2a81152be5d 100644 --- a/lib/sqlalchemy/future/__init__.py +++ b/lib/sqlalchemy/future/__init__.py @@ -1,5 +1,5 @@ -# sql/future/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# future/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/engine.py b/lib/sqlalchemy/future/engine.py index 3235529f736..1aa2beb7b80 100644 --- a/lib/sqlalchemy/future/engine.py +++ b/lib/sqlalchemy/future/engine.py @@ -1,3 +1,9 @@ +# future/engine.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import util from ..engine import Connection as _LegacyConnection from ..engine import create_engine as _create_engine diff --git a/lib/sqlalchemy/future/orm/__init__.py b/lib/sqlalchemy/future/orm/__init__.py index 674dd448cdc..501ff2ed2eb 100644 --- a/lib/sqlalchemy/future/orm/__init__.py +++ b/lib/sqlalchemy/future/orm/__init__.py @@ -1,5 +1,5 @@ -# sql/future/orm/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# future/orm/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/inspection.py b/lib/sqlalchemy/inspection.py index 1f377b46abc..9e70cb8d271 100644 --- a/lib/sqlalchemy/inspection.py +++ b/lib/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ -# sqlalchemy/inspect.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# inspection.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 7f90aeb32f3..1183636dc8e 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -1,5 +1,5 @@ -# sqlalchemy/log.py -# Copyright (C) 2006-2023 the SQLAlchemy authors and contributors +# log.py +# Copyright (C) 2006-2024 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 3e01c6506b3..85da4ea9681 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index 2e82851a23f..be8cd41c3d3 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index 2cfe022b808..3be8916e0b1 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 36a42f3e35b..0a09ccd8852 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -1,5 +1,5 @@ -# ext/declarative/clsregistry.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# orm/clsregistry.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index bb2aed4f788..5e7e3586983 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index a254b08bd71..1f663c157ff 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1,5 +1,5 @@ # orm/context.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 3a9fae8b1c0..25d015aa20f 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -1,5 +1,5 @@ -# ext/declarative/api.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# orm/decl_api.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index dd2c38d4583..fe2131690af 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -1,5 +1,5 @@ -# ext/declarative/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# orm/decl_base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 911c3edf43b..702a27ac8b4 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py index c7e2d984f8a..5045015ad32 100644 --- a/lib/sqlalchemy/orm/descriptor_props.py +++ b/lib/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index e19701b02eb..5d5ce3642cb 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index 670ab28479e..9fa2d4818c1 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 19630ef2820..6e777f08e8d 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py index 0c01d154195..9ef29a45361 100644 --- a/lib/sqlalchemy/orm/exc.py +++ b/lib/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py index 695b05064f6..419e8471e37 100644 --- a/lib/sqlalchemy/orm/identity.py +++ b/lib/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index ce9809a1142..d8d7e1c914c 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 51d642d0866..5237c04b680 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index 70fb731453c..3cec7a01245 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index d74d708437e..ba668b8aedc 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index dad1fd46c05..ab14c403c3d 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index b473022528a..a582ca1d76f 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index e485e465da3..287bfdc1ded 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index c7080f85b59..8ebc0216dba 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index d19420ab856..2824e00e07c 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index 8631654921a..ccdb6503428 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 5a7a8bb211f..3c1e5b4477d 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 2e0b06f5277..65daed3b1de 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 770320794d0..dd9f8b87ae7 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index 170847f42e8..ed3c897b373 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1,4 +1,5 @@ -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# orm/strategy_options.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index f92c80fb2a7..96bcd7262fe 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index a3854dd3cbb..9353282df5d 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index e8fa0731e2e..a296fc0c17d 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py index 1a35b1b86c5..b3368b493b0 100644 --- a/lib/sqlalchemy/pool/__init__.py +++ b/lib/sqlalchemy/pool/__init__.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# pool/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index 9bcbc7ac2e6..f739e9d99c5 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# pool/base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py index 8560bb7639b..7acd6afbaed 100644 --- a/lib/sqlalchemy/pool/dbapi_proxy.py +++ b/lib/sqlalchemy/pool/dbapi_proxy.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/dbapi_proxy.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# pool/dbapi_proxy.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index 898223c31f8..cdbfa5dc123 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool/events.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# pool/events.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py index 0004b3fb291..e08d66404a7 100644 --- a/lib/sqlalchemy/pool/impl.py +++ b/lib/sqlalchemy/pool/impl.py @@ -1,5 +1,5 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# pool/impl.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py index 6cbbb56a846..3efc24d577c 100644 --- a/lib/sqlalchemy/processors.py +++ b/lib/sqlalchemy/processors.py @@ -1,5 +1,5 @@ -# sqlalchemy/processors.py -# Copyright (C) 2010-2023 the SQLAlchemy authors and contributors +# processors.py +# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index bbe7ef09eb1..dbea1b76615 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py index 70583a910b0..94f6d8d2387 100644 --- a/lib/sqlalchemy/sql/__init__.py +++ b/lib/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index f98038d6a21..e9a8ffe8373 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index 4300b4ef42f..c44c693c79b 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index 0ba52c40229..6a86c24c6e5 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -1,5 +1,5 @@ # sql/coercions.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 1a71c4a4f94..b975c1cd7f9 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index 4f509d9a562..2d0ceb0c182 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -1,5 +1,5 @@ # sql/crud.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index 275d38c99fe..343d8f046d4 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index bb446748086..257039459a6 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index ae261830169..eb314dcbf0c 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 4eac2262853..7671e75d487 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index 0e6a9d10320..d650a962933 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -1,5 +1,5 @@ -# sqlalchemy/sql/events.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# sql/events.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index 3b7f04c3f7c..009f061f633 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -1,5 +1,5 @@ # sql/expression.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index a15d765d7b7..cd22a131eb5 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -1,5 +1,5 @@ # sql/functions.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index b574f83ef92..0c3f24df804 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -1,5 +1,5 @@ # sql/lambdas.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/naming.py b/lib/sqlalchemy/sql/naming.py index 1792f4a46c6..5b2c49e378e 100644 --- a/lib/sqlalchemy/sql/naming.py +++ b/lib/sqlalchemy/sql/naming.py @@ -1,5 +1,5 @@ -# sqlalchemy/naming.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# sql/naming.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index b6e9e27b8cc..82c8881018e 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1,5 +1,5 @@ # sql/operators.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index e9412b40ee6..51b69f9b024 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -1,5 +1,5 @@ # sql/roles.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index 2be14bbf201..d1451666b70 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1,5 +1,5 @@ # sql/schema.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 54f80273ed0..b6e96c7b0c3 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1,5 +1,5 @@ # sql/selectable.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 36fcabea5a4..94dfd84c781 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1,5 +1,5 @@ # sql/sqltypes.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index fd20bbc4cf5..41b960c9c33 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -1,3 +1,9 @@ +# sql/traversals.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from collections import deque from collections import namedtuple import itertools diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 25ae7eabc23..9af1129cf5b 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1,5 +1,5 @@ -# sql/types_api.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# sql/type_api.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index d520b0c6f13..f4361c3ecdd 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -1,5 +1,5 @@ # sql/util.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index 42307b3bb2a..a3b2a69127b 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -1,5 +1,5 @@ # sql/visitors.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index 28bc3c5efd2..db6abbd4f7b 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -1,5 +1,5 @@ # testing/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 754f535f564..6825eda204a 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -1,5 +1,5 @@ # testing/assertions.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index e380223a158..dc5523cb87a 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -1,5 +1,5 @@ # testing/assertsql.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index ccc219f68aa..63b2a10da6d 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -1,5 +1,5 @@ # testing/asyncio.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index ab52d233da9..45c789cb250 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -1,5 +1,5 @@ # testing/config.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 29d129fecf2..64843f4e163 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/entities.py b/lib/sqlalchemy/testing/entities.py index b1c278a5e1a..24b9067db50 100644 --- a/lib/sqlalchemy/testing/entities.py +++ b/lib/sqlalchemy/testing/entities.py @@ -1,5 +1,5 @@ # testing/entities.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index 1bdbbbbfcce..f61b4b0ca47 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -1,5 +1,5 @@ # testing/exclusions.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index 4d3de1d82a7..ff650c47d12 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -1,5 +1,5 @@ # testing/fixtures.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/mock.py b/lib/sqlalchemy/testing/mock.py index 533d31bb16e..38f90dd4d79 100644 --- a/lib/sqlalchemy/testing/mock.py +++ b/lib/sqlalchemy/testing/mock.py @@ -1,5 +1,5 @@ # testing/mock.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index ec788b62321..79f0ee90802 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -1,5 +1,5 @@ # testing/pickleable.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/__init__.py b/lib/sqlalchemy/testing/plugin/__init__.py index e69de29bb2d..0f987773195 100644 --- a/lib/sqlalchemy/testing/plugin/__init__.py +++ b/lib/sqlalchemy/testing/plugin/__init__.py @@ -0,0 +1,6 @@ +# testing/plugin/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/testing/plugin/bootstrap.py b/lib/sqlalchemy/testing/plugin/bootstrap.py index 6721f485fef..cc50912c8cb 100644 --- a/lib/sqlalchemy/testing/plugin/bootstrap.py +++ b/lib/sqlalchemy/testing/plugin/bootstrap.py @@ -1,3 +1,9 @@ +# testing/plugin/bootstrap.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """ Bootstrapper for test framework plugins. diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index e594f3736ae..693dfd4f24d 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -1,5 +1,5 @@ -# plugin/plugin_base.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# testing/plugin/plugin_base.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py index 5a51582925d..38b1b8cf3f7 100644 --- a/lib/sqlalchemy/testing/plugin/pytestplugin.py +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -1,3 +1,9 @@ +# testing/plugin/pytestplugin.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php try: # installed by bootstrap.py import sqla_plugin_base as plugin_base diff --git a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py index 36b68417bce..12ed987433d 100644 --- a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py +++ b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py @@ -1,3 +1,9 @@ +# testing/plugin/reinvent_fixtures_py2k.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php """ invent a quick version of pytest autouse fixtures as pytest's unacceptably slow collection/high memory use in pytest 4.6.11, which is the highest version that diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index eba23547f57..7796e1334ab 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -1,5 +1,5 @@ # testing/profiling.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py index 90c4d93cfc4..56c1be2518a 100644 --- a/lib/sqlalchemy/testing/provision.py +++ b/lib/sqlalchemy/testing/provision.py @@ -1,3 +1,9 @@ +# testing/provision.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import collections import logging diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index 0b2e059d0a1..e1b2d609526 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1,5 +1,5 @@ # testing/requirements.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py index a6d098dd5d1..e816e817280 100644 --- a/lib/sqlalchemy/testing/schema.py +++ b/lib/sqlalchemy/testing/schema.py @@ -1,5 +1,5 @@ # testing/schema.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/__init__.py b/lib/sqlalchemy/testing/suite/__init__.py index 30817e1e445..a146cb3163c 100644 --- a/lib/sqlalchemy/testing/suite/__init__.py +++ b/lib/sqlalchemy/testing/suite/__init__.py @@ -1,3 +1,9 @@ +# testing/suite/__init__.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .test_cte import * # noqa from .test_ddl import * # noqa from .test_deprecations import * # noqa diff --git a/lib/sqlalchemy/testing/suite/test_cte.py b/lib/sqlalchemy/testing/suite/test_cte.py index a94ee55dc03..9a1a2cf75a3 100644 --- a/lib/sqlalchemy/testing/suite/test_cte.py +++ b/lib/sqlalchemy/testing/suite/test_cte.py @@ -1,3 +1,9 @@ +# testing/suite/test_cte.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column diff --git a/lib/sqlalchemy/testing/suite/test_ddl.py b/lib/sqlalchemy/testing/suite/test_ddl.py index b3fee551e01..e09064cccde 100644 --- a/lib/sqlalchemy/testing/suite/test_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_ddl.py @@ -1,3 +1,9 @@ +# testing/suite/test_ddl.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import random from . import testing diff --git a/lib/sqlalchemy/testing/suite/test_deprecations.py b/lib/sqlalchemy/testing/suite/test_deprecations.py index b36162fa59b..676d2d1edf3 100644 --- a/lib/sqlalchemy/testing/suite/test_deprecations.py +++ b/lib/sqlalchemy/testing/suite/test_deprecations.py @@ -1,3 +1,9 @@ +# testing/suite/test_deprecations.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index c1c0856c325..625d29c39c0 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -1,4 +1,10 @@ #! coding: utf-8 +# testing/suite/test_dialect.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from . import testing from .. import assert_raises diff --git a/lib/sqlalchemy/testing/suite/test_insert.py b/lib/sqlalchemy/testing/suite/test_insert.py index 3c22f50b27e..2068e83a2ba 100644 --- a/lib/sqlalchemy/testing/suite/test_insert.py +++ b/lib/sqlalchemy/testing/suite/test_insert.py @@ -1,3 +1,9 @@ +# testing/suite/test_insert.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import config from .. import engines from .. import fixtures diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index f1b8d8aaf8a..dbd6d1a6331 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1,3 +1,9 @@ +# testing/suite/test_reflection.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import operator import re diff --git a/lib/sqlalchemy/testing/suite/test_results.py b/lib/sqlalchemy/testing/suite/test_results.py index c41a55025d6..ede30c6f8b1 100644 --- a/lib/sqlalchemy/testing/suite/test_results.py +++ b/lib/sqlalchemy/testing/suite/test_results.py @@ -1,3 +1,9 @@ +# testing/suite/test_results.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import datetime from .. import engines diff --git a/lib/sqlalchemy/testing/suite/test_rowcount.py b/lib/sqlalchemy/testing/suite/test_rowcount.py index 82e831f4966..ca6995dbe46 100644 --- a/lib/sqlalchemy/testing/suite/test_rowcount.py +++ b/lib/sqlalchemy/testing/suite/test_rowcount.py @@ -1,3 +1,9 @@ +# testing/suite/test_rowcount.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from sqlalchemy import bindparam from sqlalchemy import Column from sqlalchemy import Integer diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index cb78fff2e8e..42369a4e0f0 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -1,3 +1,9 @@ +# testing/suite/test_select.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php import itertools from .. import AssertsCompiledSQL diff --git a/lib/sqlalchemy/testing/suite/test_sequence.py b/lib/sqlalchemy/testing/suite/test_sequence.py index d6747d25386..596dee803e9 100644 --- a/lib/sqlalchemy/testing/suite/test_sequence.py +++ b/lib/sqlalchemy/testing/suite/test_sequence.py @@ -1,3 +1,9 @@ +# testing/suite/test_sequence.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import config from .. import fixtures from ..assertions import eq_ diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index 6dc50895752..31a63ef3c22 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -1,3 +1,9 @@ +# testing/suite/test_types.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php # coding: utf-8 import datetime diff --git a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py index a4ae3348ed6..6740772e339 100644 --- a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py @@ -1,3 +1,9 @@ +# testing/suite/test_unicode_ddl.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php # coding: utf-8 """verrrrry basic unicode column name testing""" diff --git a/lib/sqlalchemy/testing/suite/test_update_delete.py b/lib/sqlalchemy/testing/suite/test_update_delete.py index f04a9d57ef0..1604fcd2d14 100644 --- a/lib/sqlalchemy/testing/suite/test_update_delete.py +++ b/lib/sqlalchemy/testing/suite/test_update_delete.py @@ -1,3 +1,9 @@ +# testing/suite/test_update_delete.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php from .. import fixtures from ..assertions import eq_ from ..schema import Column diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index e187de91bf1..8bfe5477e46 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -1,5 +1,5 @@ # testing/util.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index 7969a73ff7b..c4c3ecf00f4 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -1,5 +1,5 @@ # testing/warnings.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index ed62ddd989e..096b3576966 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -1,5 +1,5 @@ # types.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 7a514e9f12e..544f4c06f63 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -1,5 +1,5 @@ # util/__init__.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index b6ea7fd4f4a..6c2a5aef38a 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1,5 +1,5 @@ # util/_collections.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_compat_py3k.py b/lib/sqlalchemy/util/_compat_py3k.py index 0d31250c9a3..8317112d944 100644 --- a/lib/sqlalchemy/util/_compat_py3k.py +++ b/lib/sqlalchemy/util/_compat_py3k.py @@ -1,5 +1,5 @@ # util/_compat_py3k.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index 17f6debb0b3..cc5b1c2faea 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -1,5 +1,5 @@ # util/_concurrency_py3k.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_preloaded.py b/lib/sqlalchemy/util/_preloaded.py index 2a859feeee8..9a962adcb91 100644 --- a/lib/sqlalchemy/util/_preloaded.py +++ b/lib/sqlalchemy/util/_preloaded.py @@ -1,5 +1,5 @@ # util/_preloaded.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 10a979db16a..81e8dbf475a 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -1,5 +1,5 @@ # util/compat.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index 59a1a747a57..546d82a836a 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -1,5 +1,5 @@ # util/concurrency.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index 8243aeb65d1..0a3266eed69 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -1,5 +1,5 @@ # util/deprecations.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index e4924e4ea06..f3e960c1b7c 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1,5 +1,5 @@ # util/langhelpers.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py index 77646454182..c6f66f2473a 100644 --- a/lib/sqlalchemy/util/queue.py +++ b/lib/sqlalchemy/util/queue.py @@ -1,5 +1,5 @@ # util/queue.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/tool_support.py b/lib/sqlalchemy/util/tool_support.py new file mode 100644 index 00000000000..a203a2ab75a --- /dev/null +++ b/lib/sqlalchemy/util/tool_support.py @@ -0,0 +1,201 @@ +# util/tool_support.py +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +# mypy: allow-untyped-defs, allow-untyped-calls +"""support routines for the helpers in tools/. + +These aren't imported by the enclosing util package as the are not +needed for normal library use. + +""" +from __future__ import annotations + +from argparse import ArgumentParser +from argparse import Namespace +import contextlib +import difflib +import os +from pathlib import Path +import shlex +import shutil +import subprocess +import sys +from typing import Any +from typing import Dict +from typing import Iterator +from typing import Optional +from typing import Union + +from . import compat + + +class code_writer_cmd: + parser: ArgumentParser + args: Namespace + suppress_output: bool + diffs_detected: bool + source_root: Path + pyproject_toml_path: Path + + def __init__(self, tool_script: str): + self.source_root = Path(tool_script).parent.parent + self.pyproject_toml_path = self.source_root / Path("pyproject.toml") + assert self.pyproject_toml_path.exists() + + self.parser = ArgumentParser() + self.parser.add_argument( + "--stdout", + action="store_true", + help="Write to stdout instead of saving to file", + ) + self.parser.add_argument( + "-c", + "--check", + help="Don't write the files back, just return the " + "status. Return code 0 means nothing would change. " + "Return code 1 means some files would be reformatted", + action="store_true", + ) + + def run_zimports(self, tempfile: str) -> None: + self._run_console_script( + str(tempfile), + { + "entrypoint": "zimports", + "options": f"--toml-config {self.pyproject_toml_path}", + }, + ) + + def run_black(self, tempfile: str) -> None: + self._run_console_script( + str(tempfile), + { + "entrypoint": "black", + "options": f"--config {self.pyproject_toml_path}", + }, + ) + + def _run_console_script(self, path: str, options: Dict[str, Any]) -> None: + """Run a Python console application from within the process. + + Used for black, zimports + + """ + + is_posix = os.name == "posix" + + entrypoint_name = options["entrypoint"] + + for entry in compat.importlib_metadata_get("console_scripts"): + if entry.name == entrypoint_name: + impl = entry + break + else: + raise Exception( + f"Could not find entrypoint console_scripts.{entrypoint_name}" + ) + cmdline_options_str = options.get("options", "") + cmdline_options_list = shlex.split( + cmdline_options_str, posix=is_posix + ) + [path] + + kw: Dict[str, Any] = {} + if self.suppress_output: + kw["stdout"] = kw["stderr"] = subprocess.DEVNULL + + subprocess.run( + [ + sys.executable, + "-c", + "import %s; %s.%s()" % (impl.module, impl.module, impl.attr), + ] + + cmdline_options_list, + cwd=str(self.source_root), + **kw, + ) + + def write_status(self, *text: str) -> None: + if not self.suppress_output: + sys.stderr.write(" ".join(text)) + + def write_output_file_from_text( + self, text: str, destination_path: Union[str, Path] + ) -> None: + if self.args.check: + self._run_diff(destination_path, source=text) + elif self.args.stdout: + print(text) + else: + self.write_status(f"Writing {destination_path}...") + Path(destination_path).write_text( + text, encoding="utf-8", newline="\n" + ) + self.write_status("done\n") + + def write_output_file_from_tempfile( + self, tempfile: str, destination_path: str + ) -> None: + if self.args.check: + self._run_diff(destination_path, source_file=tempfile) + os.unlink(tempfile) + elif self.args.stdout: + with open(tempfile) as tf: + print(tf.read()) + os.unlink(tempfile) + else: + self.write_status(f"Writing {destination_path}...") + shutil.move(tempfile, destination_path) + self.write_status("done\n") + + def _run_diff( + self, + destination_path: Union[str, Path], + *, + source: Optional[str] = None, + source_file: Optional[str] = None, + ) -> None: + if source_file: + with open(source_file, encoding="utf-8") as tf: + source_lines = list(tf) + elif source is not None: + source_lines = source.splitlines(keepends=True) + else: + assert False, "source or source_file is required" + + with open(destination_path, encoding="utf-8") as dp: + d = difflib.unified_diff( + list(dp), + source_lines, + fromfile=Path(destination_path).as_posix(), + tofile="", + n=3, + lineterm="\n", + ) + d_as_list = list(d) + if d_as_list: + self.diffs_detected = True + print("".join(d_as_list)) + + @contextlib.contextmanager + def add_arguments(self) -> Iterator[ArgumentParser]: + yield self.parser + + @contextlib.contextmanager + def run_program(self) -> Iterator[None]: + self.args = self.parser.parse_args() + if self.args.check: + self.diffs_detected = False + self.suppress_output = True + elif self.args.stdout: + self.suppress_output = True + else: + self.suppress_output = False + yield + + if self.args.check and self.diffs_detected: + sys.exit(1) + else: + sys.exit(0) diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py index 2235c2728dd..b6bfc7415e3 100644 --- a/lib/sqlalchemy/util/topological.py +++ b/lib/sqlalchemy/util/topological.py @@ -1,5 +1,5 @@ # util/topological.py -# Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/tools/normalize_file_headers.py b/tools/normalize_file_headers.py new file mode 100644 index 00000000000..ba4cd5734f8 --- /dev/null +++ b/tools/normalize_file_headers.py @@ -0,0 +1,69 @@ +from datetime import date +from pathlib import Path +import re + +from sqlalchemy.util.tool_support import code_writer_cmd + +sa_path = Path(__file__).parent.parent / "lib/sqlalchemy" + + +file_re = re.compile(r"^# [\w+/]+.(?:pyx?|pxd)$", re.MULTILINE) +license_re = re.compile( + r"Copyright .C. (\d+)-\d+ the SQLAlchemy authors and contributors" +) + +this_year = date.today().year +license_ = f""" +# Copyright (C) 2005-{this_year} the SQLAlchemy authors and \ +contributors +# +# +# This module is part of SQLAlchemy and is released under +# the MIT License: https://www.opensource.org/licenses/mit-license.php +""" + + +def run_file(cmd: code_writer_cmd, file: Path, update_year: bool): + content = file.read_text("utf-8") + path = str(file.relative_to(sa_path)).replace("\\", "/") # handle windows + path_comment = f"# {path}" + has_license = bool(license_re.search(content)) + if file_re.match(content.strip()): + if has_license: + to_sub = path_comment + else: + to_sub = path_comment + license_ + content = file_re.sub(to_sub, content, count=1) + else: + content = path_comment + ("\n" if has_license else license_) + content + + if has_license and update_year: + content = license_re.sub( + rf"Copyright (C) \1-{this_year} the SQLAlchemy " + "authors and contributors", + content, + 1, + ) + cmd.write_output_file_from_text(content, file) + + +def run(cmd: code_writer_cmd, update_year: bool): + i = 0 + for ext in ("py", "pyx", "pxd"): + for file in sa_path.glob(f"**/*.{ext}"): + run_file(cmd, file, update_year) + i += 1 + cmd.write_status(f"\nDone. Processed {i} files.") + + +if __name__ == "__main__": + cmd = code_writer_cmd(__file__) + with cmd.add_arguments() as parser: + parser.add_argument( + "--update-year", + action="store_true", + help="Update the year in the license files", + ) + + with cmd.run_program(): + run(cmd, cmd.args.update_year) From 6eca5a1a59858763fc7386c8d75a86968587d02c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 1 Jan 2024 16:54:58 -0500 Subject: [PATCH 573/632] ensure correct lock type propagated in pool recreate Fixed critical issue in asyncio version of the connection pool where calling :meth:`_asyncio.AsyncEngine.dispose` would produce a new connection pool that did not fully re-establish the use of asyncio-compatible mutexes, leading to the use of a plain ``threading.Lock()`` which would then cause deadlocks in an asyncio context when using concurrency features like ``asyncio.gather()``. Fixes: #10813 Change-Id: I95ec698b6a1ba79555aa0b28e6bce65fedf3b1fe (cherry picked from commit 2ed32bbf891b8f7e6c151071b4711319d9aa84f0) (cherry picked from commit c65e4f4471cd10051476caaadcc92d7a7eb557b4) --- doc/build/changelog/unreleased_14/10813.rst | 11 +++++++++++ lib/sqlalchemy/event/attr.py | 22 ++++++++++++++++++--- test/ext/asyncio/test_engine_py3k.py | 20 +++++++++++++++++++ 3 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10813.rst diff --git a/doc/build/changelog/unreleased_14/10813.rst b/doc/build/changelog/unreleased_14/10813.rst new file mode 100644 index 00000000000..d4f72d8e0b2 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10813.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, asyncio + :tickets: 10813 + :versions: 1.4.51, 2.0.25 + + Fixed critical issue in asyncio version of the connection pool where + calling :meth:`_asyncio.AsyncEngine.dispose` would produce a new connection + pool that did not fully re-establish the use of asyncio-compatible mutexes, + leading to the use of a plain ``threading.Lock()`` which would then cause + deadlocks in an asyncio context when using concurrency features like + ``asyncio.gather()``. diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index 5a85cb91ee2..7ed9d0ed90c 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -259,13 +259,25 @@ def __bool__(self): class _CompoundListener(_InstanceLevelDispatch): - __slots__ = "_exec_once_mutex", "_exec_once", "_exec_w_sync_once" + __slots__ = ( + "_exec_once_mutex", + "_exec_once", + "_exec_w_sync_once", + "_is_asyncio", + ) + + def __init__(self, *arg, **kw): + super(_CompoundListener, self).__init__(*arg, **kw) + self._is_asyncio = False def _set_asyncio(self): - self._exec_once_mutex = AsyncAdaptedLock() + self._is_asyncio = True def _memoized_attr__exec_once_mutex(self): - return threading.Lock() + if self._is_asyncio: + return AsyncAdaptedLock() + else: + return threading.Lock() def _exec_once_impl(self, retry_on_exception, *args, **kw): with self._exec_once_mutex: @@ -365,6 +377,7 @@ class _ListenerCollection(_CompoundListener): ) def __init__(self, parent, target_cls): + super(_ListenerCollection, self).__init__() if target_cls not in parent._clslevel: parent.update_subclass(target_cls) self._exec_once = False @@ -401,6 +414,9 @@ def _update(self, other, only_propagate=True): existing_listeners.extend(other_listeners) + if other._is_asyncio: + self._set_asyncio() + to_associate = other.propagate.union(other_listeners) registry._stored_in_collection_multi(self, other, to_associate) diff --git a/test/ext/asyncio/test_engine_py3k.py b/test/ext/asyncio/test_engine_py3k.py index 7875b9aec4c..cb79fa3826c 100644 --- a/test/ext/asyncio/test_engine_py3k.py +++ b/test/ext/asyncio/test_engine_py3k.py @@ -1169,3 +1169,23 @@ def test_regen_trans_but_not_conn(self, connection_no_trans): async_t2 = async_conn.get_transaction() is_(async_t1, async_t2) + + +class PoolRegenTest(EngineFixture): + @testing.requires.queue_pool + @async_test + @testing.variation("do_dispose", [True, False]) + async def test_gather_after_dispose(self, testing_engine, do_dispose): + engine = testing_engine( + asyncio=True, options=dict(pool_size=10, max_overflow=10) + ) + + async def thing(engine): + async with engine.connect() as conn: + await conn.exec_driver_sql("select 1") + + if do_dispose: + await engine.dispose() + + tasks = [thing(engine) for _ in range(10)] + await asyncio.gather(*tasks) From 2a7f478e2a81bfe6277c872633b578c3ecd6108b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Jan 2024 20:03:35 -0500 Subject: [PATCH 574/632] fix big scary setuptools warning about license_files Change-Id: Ic84b769be748d410ed89539ffd798ea91d00af54 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 307087dc0be..852089a99bb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -11,7 +11,7 @@ url = https://www.sqlalchemy.org author = Mike Bayer author_email = mike_mp@zzzcomputing.com license = MIT -license_file = LICENSE +license_files = LICENSE classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers From 16925f96564ab85328f654ac8c895fb44e227cfb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Jan 2024 20:07:42 -0500 Subject: [PATCH 575/632] happy new year, continued Change-Id: I9cbd176d73c29c7975ead644367560a24f267de7 --- LICENSE | 2 +- doc/build/conf.py | 2 +- doc/build/copyright.rst | 2 +- lib/sqlalchemy/cextension/immutabledict.c | 2 +- lib/sqlalchemy/cextension/processors.c | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/LICENSE b/LICENSE index 7bf9bbe9683..967cdc5dc10 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2005-2023 SQLAlchemy authors and contributors . +Copyright 2005-2024 SQLAlchemy authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/build/conf.py b/doc/build/conf.py index 0066ef7aad8..4c2f114dfbc 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -212,7 +212,7 @@ # General information about the project. project = u"SQLAlchemy" -copyright = u"2007-2023, the SQLAlchemy authors and contributors" # noqa +copyright = u"2007-2024, the SQLAlchemy authors and contributors" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/build/copyright.rst b/doc/build/copyright.rst index aa4abac9b1d..b3a67ccf469 100644 --- a/doc/build/copyright.rst +++ b/doc/build/copyright.rst @@ -6,7 +6,7 @@ Appendix: Copyright This is the MIT license: ``_ -Copyright (c) 2005-2023 Michael Bayer and contributors. +Copyright (c) 2005-2024 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this diff --git a/lib/sqlalchemy/cextension/immutabledict.c b/lib/sqlalchemy/cextension/immutabledict.c index 2dbc7381de7..2d6bd962257 100644 --- a/lib/sqlalchemy/cextension/immutabledict.c +++ b/lib/sqlalchemy/cextension/immutabledict.c @@ -1,6 +1,6 @@ /* immuatbledict.c -Copyright (C) 2005-2023 the SQLAlchemy authors and contributors +Copyright (C) 2005-2024 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c index d6e7c4fe4e0..12ed79e96ac 100644 --- a/lib/sqlalchemy/cextension/processors.c +++ b/lib/sqlalchemy/cextension/processors.c @@ -1,6 +1,6 @@ /* processors.c -Copyright (C) 2010-2023 the SQLAlchemy authors and contributors +Copyright (C) 2010-2024 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 00eddc4475b..8e8b6f9e4fe 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -1,6 +1,6 @@ /* resultproxy.c -Copyright (C) 2010-2023 the SQLAlchemy authors and contributors +Copyright (C) 2010-2024 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under From 2ed9b687a4da84bee0191e733a47e39f0a8f91f9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Jan 2024 20:08:42 -0500 Subject: [PATCH 576/632] - 1.4.51 --- doc/build/changelog/changelog_14.rst | 38 ++++++++++++++++++++- doc/build/changelog/unreleased_14/10650.rst | 7 ---- doc/build/changelog/unreleased_14/10782.rst | 15 -------- doc/build/changelog/unreleased_14/10813.rst | 11 ------ doc/build/conf.py | 4 +-- 5 files changed, 39 insertions(+), 36 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/10650.rst delete mode 100644 doc/build/changelog/unreleased_14/10782.rst delete mode 100644 doc/build/changelog/unreleased_14/10813.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 00a709e4497..97fc250db3a 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,43 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.51 - :include_notes_from: unreleased_14 + :released: January 2, 2024 + + .. change:: + :tags: bug, mysql + :tickets: 10650 + :versions: 2.0.24 + + Fixed regression introduced by the fix in ticket :ticket:`10492` when using + pool pre-ping with PyMySQL version older than 1.0. + + .. change:: + :tags: bug, orm + :tickets: 10782 + :versions: 2.0.24, 1.4.51 + + Improved a fix first implemented for :ticket:`3208` released in version + 0.9.8, where the registry of classes used internally by declarative could + be subject to a race condition in the case where individual mapped classes + are being garbage collected at the same time while new mapped classes are + being constructed, as can happen in some test suite configurations or + dynamic class creation environments. In addition to the weakref check + already added, the list of items being iterated is also copied first to + avoid "list changed while iterating" errors. Pull request courtesy Yilei + Yang. + + + .. change:: + :tags: bug, asyncio + :tickets: 10813 + :versions: 1.4.51, 2.0.25 + + Fixed critical issue in asyncio version of the connection pool where + calling :meth:`_asyncio.AsyncEngine.dispose` would produce a new connection + pool that did not fully re-establish the use of asyncio-compatible mutexes, + leading to the use of a plain ``threading.Lock()`` which would then cause + deadlocks in an asyncio context when using concurrency features like + ``asyncio.gather()``. .. changelog:: :version: 1.4.50 diff --git a/doc/build/changelog/unreleased_14/10650.rst b/doc/build/changelog/unreleased_14/10650.rst deleted file mode 100644 index dce6b4c75a5..00000000000 --- a/doc/build/changelog/unreleased_14/10650.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, mysql - :tickets: 10650 - :versions: 2.0.24 - - Fixed regression introduced by the fix in ticket :ticket:`10492` when using - pool pre-ping with PyMySQL version older than 1.0. diff --git a/doc/build/changelog/unreleased_14/10782.rst b/doc/build/changelog/unreleased_14/10782.rst deleted file mode 100644 index d7b219a3652..00000000000 --- a/doc/build/changelog/unreleased_14/10782.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 10782 - :versions: 2.0.24, 1.4.51 - - Improved a fix first implemented for :ticket:`3208` released in version - 0.9.8, where the registry of classes used internally by declarative could - be subject to a race condition in the case where individual mapped classes - are being garbage collected at the same time while new mapped classes are - being constructed, as can happen in some test suite configurations or - dynamic class creation environments. In addition to the weakref check - already added, the list of items being iterated is also copied first to - avoid "list changed while iterating" errors. Pull request courtesy Yilei - Yang. - diff --git a/doc/build/changelog/unreleased_14/10813.rst b/doc/build/changelog/unreleased_14/10813.rst deleted file mode 100644 index d4f72d8e0b2..00000000000 --- a/doc/build/changelog/unreleased_14/10813.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: bug, asyncio - :tickets: 10813 - :versions: 1.4.51, 2.0.25 - - Fixed critical issue in asyncio version of the connection pool where - calling :meth:`_asyncio.AsyncEngine.dispose` would produce a new connection - pool that did not fully re-establish the use of asyncio-compatible mutexes, - leading to the use of a plain ``threading.Lock()`` which would then cause - deadlocks in an asyncio context when using concurrency features like - ``asyncio.gather()``. diff --git a/doc/build/conf.py b/doc/build/conf.py index 4c2f114dfbc..587ddb12da5 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -221,9 +221,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.50" +release = "1.4.51" -release_date = "October 29, 2023" +release_date = "January 2, 2024" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 3743a47fe0c7b3f2e485690a0a1835abf10b0a68 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Jan 2024 20:32:38 -0500 Subject: [PATCH 577/632] Version 1.4.52 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 97fc250db3a..78b186863f0 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.52 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.51 :released: January 2, 2024 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 063ac840eb2..4b03001fba4 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.51" +__version__ = "1.4.52" def __go(lcls): From 0653c1ca11f4f5ac8ee17144d78afe71740a88e8 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 11 Jan 2024 19:57:25 +0100 Subject: [PATCH 578/632] fix wheel on windows-mac Change-Id: Id759f1f533cff5691332441455ff3f23fbabbe55 (cherry picked from commit a9416a9e73331dc330e30343f26631b29108373b) (cherry picked from commit a8cdc8f53816d3156e1f8b4032c3375b91dba825) --- .github/workflows/create-wheels.yaml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 101f6af162b..959a7c66ab0 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -34,10 +34,6 @@ jobs: - x64 - x86 - include: - - python-version: "2.7" - extra-requires: "mock" - exclude: - os: "macos-latest" architecture: x86 From 4959dea070d359a95e80929f96c0a5b81702ed13 Mon Sep 17 00:00:00 2001 From: Zhong Zheng Date: Fri, 16 Feb 2024 12:20:59 -0500 Subject: [PATCH 579/632] Fix mysql dialect text docstring, length is interpreted as byte size ### Description The `Text` and its variant types in MySQL are bytes size limited, not character length, so fixing the doctoring where the upper limit uses the `characters` as the unit instead of `bytes` https://dev.mysql.com/doc/refman/5.7/en/storage-requirements.html https://dev.mysql.com/doc/refman/8.0/en/storage-requirements.html Screenshot 2024-02-15 at 17 27 59 ### Checklist This pull request is: - [x] A documentation / typographical / small typing error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. **Have a nice day!** Closes: #11018 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/11018 Pull-request-sha: 13fa52917efea9a229c7abf19a3be40e24a79cb9 Change-Id: Iea903a6dc4b52ee4b7b5d2d64256c69abbd1f8aa (cherry picked from commit 1c58fe53b6fd069cbb82955ddaf9eb5405076146) (cherry picked from commit 51011db22b0d51b5560d55b97671631cadc10265) --- lib/sqlalchemy/dialects/mysql/types.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index a8a6042f897..a7996189b76 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -499,7 +499,7 @@ def __init__(self, display_width=None): class TEXT(_StringType, sqltypes.TEXT): - """MySQL TEXT type, for text up to 2^16 characters.""" + """MySQL TEXT type, for character storage encoded up to 2^16 bytes.""" __visit_name__ = "TEXT" @@ -508,7 +508,7 @@ def __init__(self, length=None, **kw): :param length: Optional, if provided the server may optimize storage by substituting the smallest TEXT type sufficient to store - ``length`` characters. + ``length`` bytes of characters. :param charset: Optional, a column-level character set for this string value. Takes precedence to 'ascii' or 'unicode' short-hand. @@ -535,7 +535,7 @@ def __init__(self, length=None, **kw): class TINYTEXT(_StringType): - """MySQL TINYTEXT type, for text up to 2^8 characters.""" + """MySQL TINYTEXT type, for character storage encoded up to 2^8 bytes.""" __visit_name__ = "TINYTEXT" @@ -567,7 +567,8 @@ def __init__(self, **kwargs): class MEDIUMTEXT(_StringType): - """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" + """MySQL MEDIUMTEXT type, for character storage encoded up + to 2^24 bytes.""" __visit_name__ = "MEDIUMTEXT" @@ -599,7 +600,7 @@ def __init__(self, **kwargs): class LONGTEXT(_StringType): - """MySQL LONGTEXT type, for text up to 2^32 characters.""" + """MySQL LONGTEXT type, for character storage encoded up to 2^32 bytes.""" __visit_name__ = "LONGTEXT" @@ -683,7 +684,7 @@ def __init__(self, length=None, **kwargs): super(CHAR, self).__init__(length=length, **kwargs) @classmethod - def _adapt_string_for_cast(self, type_): + def _adapt_string_for_cast(cls, type_): # copy the given string type into a CHAR # for the purposes of rendering a CAST expression type_ = sqltypes.to_instance(type_) From 0f3e8d08d6124440bc37f9799adb8caad71e63f1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Sep 2023 08:58:52 -0400 Subject: [PATCH 580/632] Ensure loader criteria used for ORM join with expression condition Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply itself to a :meth:`_sql.Select.join` where the ON clause were given as a plain SQL comparison, rather than as a relationship target or similar. Fixes: #10365 Change-Id: Ie6d08fb01a3079b7c3ccd3a8241031d46a56e19d (cherry picked from commit f8086a809bba358790cff032d745814b186ab8cb) --- doc/build/changelog/unreleased_14/10365.rst | 9 +++++ lib/sqlalchemy/orm/util.py | 37 ++++++++++----------- setup.cfg | 2 ++ test/orm/test_relationship_criteria.py | 23 +++++++++++++ 4 files changed, 52 insertions(+), 19 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/10365.rst diff --git a/doc/build/changelog/unreleased_14/10365.rst b/doc/build/changelog/unreleased_14/10365.rst new file mode 100644 index 00000000000..5eb4f440657 --- /dev/null +++ b/doc/build/changelog/unreleased_14/10365.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, orm + :tickets: 10365 + + Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply + itself to a :meth:`_sql.Select.join` where the ON clause were given as a + plain SQL comparison, rather than as a relationship target or similar. + + This is a backport of the same issue fixed in version 2.0 for 2.0.22. diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index a296fc0c17d..28bf5b76c8d 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1737,9 +1737,8 @@ def __init__( else: prop = None + left_selectable = left_info.selectable if prop: - left_selectable = left_info.selectable - if sql_util.clause_is_present(on_selectable, left_selectable): adapt_from = on_selectable else: @@ -1774,25 +1773,25 @@ def __init__( self._target_adapter = target_adapter - # we don't use the normal coercions logic for _ORMJoin - # (probably should), so do some gymnastics to get the entity. - # logic here is for #8721, which was a major bug in 1.4 - # for almost two years, not reported/fixed until 1.4.43 (!) - if left_info.is_selectable: - parententity = left_selectable._annotations.get( - "parententity", None - ) - elif left_info.is_mapper or left_info.is_aliased_class: - parententity = left_info - else: - parententity = None + # we don't use the normal coercions logic for _ORMJoin + # (probably should), so do some gymnastics to get the entity. + # logic here is for #8721, which was a major bug in 1.4 + # for almost two years, not reported/fixed until 1.4.43 (!) + if left_info.is_selectable: + parententity = left_selectable._annotations.get( + "parententity", None + ) + elif left_info.is_mapper or left_info.is_aliased_class: + parententity = left_info + else: + parententity = None - if parententity is not None: - self._annotations = self._annotations.union( - {"parententity": parententity} - ) + if parententity is not None: + self._annotations = self._annotations.union( + {"parententity": parententity} + ) - augment_onclause = onclause is None and _extra_criteria + augment_onclause = bool(_extra_criteria) and not prop expression.Join.__init__(self, left, right, onclause, isouter, full) if augment_onclause: diff --git a/setup.cfg b/setup.cfg index 852089a99bb..c11b40cbbf2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -114,6 +114,8 @@ ignore = N801,N802,N806, RST304,RST303,RST299,RST399, W503,W504 + U100 + IS001 exclude = .venv,.git,.tox,dist,doc,*egg,build import-order-style = google application-import-names = sqlalchemy,test diff --git a/test/orm/test_relationship_criteria.py b/test/orm/test_relationship_criteria.py index d93f1fc8f30..82ad752c44d 100644 --- a/test/orm/test_relationship_criteria.py +++ b/test/orm/test_relationship_criteria.py @@ -306,6 +306,13 @@ def test_criteria_post_replace(self, user_address_fixture): .join(User.addresses) .options(with_loader_criteria(User, User.name != "name")), ), + ( + # issue #10365 + lambda User, Address: select(Address) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options(with_loader_criteria(User, User.name != "name")), + ), ( lambda User, Address: select(Address) .select_from(orm_join(User, Address, User.addresses)) @@ -354,6 +361,13 @@ def test_criteria_select_from_w_join_left( .join(User.addresses) .options(with_loader_criteria(User, User.name != "name")), ), + ( + # issue #10365 - this seems to have already worked + lambda User, Address: select(Address.id, User.id) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options(with_loader_criteria(User, User.name != "name")), + ), ( lambda User, Address: select(Address.id, User.id) .select_from(orm_join(User, Address, User.addresses)) @@ -403,6 +417,15 @@ def test_criteria_select_from_w_join_left_including_entity( with_loader_criteria(Address, Address.email_address != "email") ), ), + ( + # issue #10365 + lambda User, Address: select(Address) + .select_from(User) + .join(Address, User.id == Address.user_id) + .options( + with_loader_criteria(Address, Address.email_address != "email") + ), + ), ( # for orm_join(), this is set up before we have the context # available that allows with_loader_criteria to be set up From 768507602e4564108799c0c6bfd3d7ceb734784b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 4 Mar 2024 08:24:31 -0500 Subject: [PATCH 581/632] - 1.4.52 --- doc/build/changelog/changelog_14.rst | 12 +++++++++++- doc/build/changelog/unreleased_14/10365.rst | 9 --------- doc/build/conf.py | 4 ++-- 3 files changed, 13 insertions(+), 12 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/10365.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 78b186863f0..3ee8b6d2df3 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,17 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.52 - :include_notes_from: unreleased_14 + :released: March 4, 2024 + + .. change:: + :tags: bug, orm + :tickets: 10365 + + Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply + itself to a :meth:`_sql.Select.join` where the ON clause were given as a + plain SQL comparison, rather than as a relationship target or similar. + + This is a backport of the same issue fixed in version 2.0 for 2.0.22. .. changelog:: :version: 1.4.51 diff --git a/doc/build/changelog/unreleased_14/10365.rst b/doc/build/changelog/unreleased_14/10365.rst deleted file mode 100644 index 5eb4f440657..00000000000 --- a/doc/build/changelog/unreleased_14/10365.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm - :tickets: 10365 - - Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply - itself to a :meth:`_sql.Select.join` where the ON clause were given as a - plain SQL comparison, rather than as a relationship target or similar. - - This is a backport of the same issue fixed in version 2.0 for 2.0.22. diff --git a/doc/build/conf.py b/doc/build/conf.py index 587ddb12da5..508116a5608 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -221,9 +221,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.51" +release = "1.4.52" -release_date = "January 2, 2024" +release_date = "March 4, 2024" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 79d1beac59fc5d9100836864ac89783f42f41801 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 4 Mar 2024 08:30:04 -0500 Subject: [PATCH 582/632] Version 1.4.53 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 3ee8b6d2df3..7ed34ad2ddf 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.53 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.52 :released: March 4, 2024 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 4b03001fba4..a1433d2f098 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.52" +__version__ = "1.4.53" def __go(lcls): From 68d4251e9fa2a1c1e4cba14ae828ee2ca8c63428 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 18 Mar 2024 10:22:06 -0400 Subject: [PATCH 583/632] add missing cache_ok directive to MyEpochType Change-Id: Ic4da52b02a4ba36d87d73974fe428b91d9d7915c (cherry picked from commit 4ef36de359449abd49b90726a1d06aef9a4084e7) --- lib/sqlalchemy/sql/type_api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index 9af1129cf5b..badadcec60b 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1244,6 +1244,8 @@ class produces the same behavior each time, it may be set to ``True``. class MyEpochType(types.TypeDecorator): impl = types.Integer + cache_ok = True + epoch = datetime.date(1970, 1, 1) def process_bind_param(self, value, dialect): From d7ea47ddff0500f802bf6b720f756bce1cd19a30 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Mar 2024 08:35:00 -0400 Subject: [PATCH 584/632] add notes clarifying the role of "$user" in pg search_path references: https://github.com/sqlalchemy/alembic/discussions/1447 Change-Id: I2ef55813699f84ac7fbca6de7522f0d3d78e6029 (cherry picked from commit 58a50c06836792da201bb610ee2f0463ac1bb073) --- lib/sqlalchemy/dialects/postgresql/base.py | 68 ++++++++++++++++++---- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 6b3af4bdae6..c36597bbc40 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -341,7 +341,9 @@ def set_search_path(dbapi_connection, connection_record): .. admonition:: Section Best Practices Summarized keep the ``search_path`` variable set to its default of ``public``, without - any other schema names. For other schema names, name these explicitly + any other schema names. Ensure the username used to connect **does not** + match remote schemas, or ensure the ``"$user"`` token is **removed** from + ``search_path``. For other schema names, name these explicitly within :class:`_schema.Table` definitions. Alternatively, the ``postgresql_ignore_search_path`` option will cause all reflected :class:`_schema.Table` objects to have a :attr:`_schema.Table.schema` @@ -350,12 +352,63 @@ def set_search_path(dbapi_connection, connection_record): The PostgreSQL dialect can reflect tables from any schema, as outlined in :ref:`metadata_reflection_schemas`. +In all cases, the first thing SQLAlchemy does when reflecting tables is +to **determine the default schema for the current database connection**. +It does this using the PostgreSQL ``current_schema()`` +function, illustated below using a PostgreSQL client session (i.e. using +the ``psql`` tool):: + + test=> select current_schema(); + current_schema + ---------------- + public + (1 row) + +Above we see that on a plain install of PostgreSQL, the default schema name +is the name ``public``. + +However, if your database username **matches the name of a schema**, PostgreSQL's +default is to then **use that name as the default schema**. Below, we log in +using the username ``scott``. When we create a schema named ``scott``, **it +implicitly changes the default schema**:: + + test=> select current_schema(); + current_schema + ---------------- + public + (1 row) + + test=> create schema scott; + CREATE SCHEMA + test=> select current_schema(); + current_schema + ---------------- + scott + (1 row) + +The behavior of ``current_schema()`` is derived from the +`PostgreSQL search path +`_ +variable ``search_path``, which in modern PostgreSQL versions defaults to this:: + + test=> show search_path; + search_path + ----------------- + "$user", public + (1 row) + +Where above, the ``"$user"`` variable will inject the current username as the +default schema, if one exists. Otherwise, ``public`` is used. + +When a :class:`_schema.Table` object is reflected, if it is present in the +schema indicated by the ``current_schema()`` function, **the schema name assigned +to the table is the Python value ``None``**. Otherwise, the schema name +will be assigned as the name of that schema. + With regards to tables which these :class:`_schema.Table` objects refer to via foreign key constraint, a decision must be made as to how the ``.schema`` is represented in those remote tables, in the case where that -remote schema name is also a member of the current -`PostgreSQL search path -`_. +remote schema name is also a member of the current ``search_path``. By default, the PostgreSQL dialect mimics the behavior encouraged by PostgreSQL's own ``pg_get_constraintdef()`` builtin procedure. This function @@ -461,13 +514,6 @@ def set_search_path(dbapi_connection, connection_record): described here are only for those users who can't, or prefer not to, stay within these guidelines. -Note that **in all cases**, the "default" schema is always reflected as -``None``. The "default" schema on PostgreSQL is that which is returned by the -PostgreSQL ``current_schema()`` function. On a typical PostgreSQL -installation, this is the name ``public``. So a table that refers to another -which is in the ``public`` (i.e. default) schema will always have the -``.schema`` attribute set to ``None``. - .. seealso:: :ref:`reflection_schema_qualified_interaction` - discussion of the issue From 01354d8fa93db87dd684f542d3a68d5ce50c127c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 19 Mar 2024 10:51:01 -0400 Subject: [PATCH 585/632] work around boldface concerns Change-Id: I99ed117bb0f1bdc1a8750bd13db5a69d5c398ae0 (cherry picked from commit 697dcc94e412e013aba298e17613ee097f423e04) --- lib/sqlalchemy/dialects/postgresql/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c36597bbc40..652a6956704 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -402,8 +402,8 @@ def set_search_path(dbapi_connection, connection_record): When a :class:`_schema.Table` object is reflected, if it is present in the schema indicated by the ``current_schema()`` function, **the schema name assigned -to the table is the Python value ``None``**. Otherwise, the schema name -will be assigned as the name of that schema. +to the ".schema" attribute of the Table is the Python "None" value**. Otherwise, the +".schema" attribute will be assigned the string name of that schema. With regards to tables which these :class:`_schema.Table` objects refer to via foreign key constraint, a decision must be made as to how From e61e631ab2114c753e14d84c0ac1eee3dddf1cd5 Mon Sep 17 00:00:00 2001 From: Stefan Wojcik <5014112+yawhide@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:23:19 -0400 Subject: [PATCH 586/632] Update links from initd.org to psycopg.org (#11244) (cherry picked from commit ac7d70dea89dfaf8e061bc8dd03a1ed7825069fc) --- doc/build/changelog/migration_12.rst | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/migration_12.rst b/doc/build/changelog/migration_12.rst index d5676e2854d..e0fb0e41408 100644 --- a/doc/build/changelog/migration_12.rst +++ b/doc/build/changelog/migration_12.rst @@ -1557,7 +1557,7 @@ Support for Batch Mode / Fast Execution Helpers The psycopg2 ``cursor.executemany()`` method has been identified as performing poorly, particularly with INSERT statements. To alleviate this, psycopg2 -has added `Fast Execution Helpers `_ +has added `Fast Execution Helpers `_ which rework statements into fewer server round trips by sending multiple DML statements in batch. SQLAlchemy 1.2 now includes support for these helpers to be used transparently whenever the :class:`_engine.Engine` makes use diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 80033d0d7ed..c2bd530ecd7 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -231,7 +231,7 @@ Modern versions of psycopg2 include a feature known as `Fast Execution Helpers \ -`_, which +`_, which have been shown in benchmarking to improve psycopg2's executemany() performance, primarily with INSERT statements, by multiple orders of magnitude. SQLAlchemy internally makes use of these extensions for ``executemany()`` style From 6a42b827766b00bfe56c6b163905fff0c1e8f140 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 24 May 2024 10:58:02 -0400 Subject: [PATCH 587/632] Add test for issue 11412 Issue #10365 was found to also fix an issue where single-inheritance criteria would not be correctly applied to a subclass entity that only appeared in the ``select_from()`` list. Fixes: #11412 Change-Id: Ic865737a3d075fceee346eea8044345233038f72 (cherry picked from commit 61d227a7d4f7be7b1f6fa72171d01c60e571939e) (cherry picked from commit a0a52e79eec780206bc014f301d301f345ec57a0) --- doc/build/changelog/changelog_14.rst | 7 +++- setup.cfg | 2 +- test/orm/inheritance/test_single.py | 52 ++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 2 deletions(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 7ed34ad2ddf..01572e55c83 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -23,7 +23,7 @@ This document details individual issue-level changes made throughout .. change:: :tags: bug, orm - :tickets: 10365 + :tickets: 10365, 11412 Fixed bug where ORM :func:`_orm.with_loader_criteria` would not apply itself to a :meth:`_sql.Select.join` where the ON clause were given as a @@ -31,6 +31,11 @@ This document details individual issue-level changes made throughout This is a backport of the same issue fixed in version 2.0 for 2.0.22. + **update** - this was found to also fix an issue where + single-inheritance criteria would not be correctly applied to a + subclass entity that only appeared in the ``select_from()`` list, + see :ticket:`11412` + .. changelog:: :version: 1.4.51 :released: January 2, 2024 diff --git a/setup.cfg b/setup.cfg index c11b40cbbf2..23471142860 100644 --- a/setup.cfg +++ b/setup.cfg @@ -108,7 +108,7 @@ enable-extensions = G # E203 is due to https://github.com/PyCQA/pycodestyle/issues/373 ignore = - A003, A004 + A003, A004, A005, A006 D, E203,E305,E711,E712,E721,E722,E741, N801,N802,N806, diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py index 041e635ab10..afb0ce95dca 100644 --- a/test/orm/inheritance/test_single.py +++ b/test/orm/inheritance/test_single.py @@ -396,6 +396,58 @@ def test_select_from_aliased_w_subclass(self): "WHERE employees_1.type IN (__[POSTCOMPILE_type_1])", ) + @testing.combinations( + ( + lambda Engineer, Report: select(Report.report_id) + .select_from(Engineer) + .join(Engineer.reports), + ), + ( + lambda Engineer, Report: select(Report.report_id).select_from( + orm_join(Engineer, Report, Engineer.reports) + ), + ), + ( + lambda Engineer, Report: select(Report.report_id).join_from( + Engineer, Report, Engineer.reports + ), + ), + ( + lambda Engineer, Report: select(Report.report_id) + .select_from(Engineer) + .join(Report), + ), + argnames="stmt_fn", + ) + @testing.combinations(True, False, argnames="alias_engineer") + def test_select_col_only_from_w_join(self, stmt_fn, alias_engineer): + """test #11412 which seems to have been fixed by #10365""" + + Engineer = self.classes.Engineer + Report = self.classes.Report + + if alias_engineer: + Engineer = aliased(Engineer) + stmt = testing.resolve_lambda( + stmt_fn, Engineer=Engineer, Report=Report + ) + + if alias_engineer: + self.assert_compile( + stmt, + "SELECT reports.report_id FROM employees AS employees_1 " + "JOIN reports ON employees_1.employee_id = " + "reports.employee_id WHERE employees_1.type " + "IN (__[POSTCOMPILE_type_1])", + ) + else: + self.assert_compile( + stmt, + "SELECT reports.report_id FROM employees JOIN reports " + "ON employees.employee_id = reports.employee_id " + "WHERE employees.type IN (__[POSTCOMPILE_type_1])", + ) + @testing.combinations( ( lambda Engineer, Report: select(Report) From e35549efb00abdd8627e87961bbc4b37c3de515d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 11 Jun 2024 09:16:26 -0400 Subject: [PATCH 588/632] loosen up hash_limit test hash_limit_string works by doing a modulus of a hash value so that the range of possible numbers is 0-N. however, there's a chance we might not populate every 0-N value in unusual cases on CI, even after iterating 500 times apparently. Loosen the change by making sure we got at least N/2 unique hash messages but not greater than N. Change-Id: I5cd2845697ec0a718ddca1c95fbc4867b06eabee (cherry picked from commit ef04a401100ff37915c281c412ed3d784565e429) --- test/base/test_warnings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/base/test_warnings.py b/test/base/test_warnings.py index be34f958b28..7e5063bdf22 100644 --- a/test/base/test_warnings.py +++ b/test/base/test_warnings.py @@ -36,7 +36,7 @@ def test_warn_deprecated_limited_cap(self): messages.add(message) eq_(len(printouts), occurrences) - eq_(len(messages), cap) + assert cap / 2 < len(messages) <= cap class ClsWarningTest(fixtures.TestBase): From 24206ad815d18507eb8e6203c9eff97a68c00147 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 11 Jun 2024 23:59:02 +0200 Subject: [PATCH 589/632] fix macos tests Change-Id: I15cd4f541d88082fe16af121726f8ea5dd73fd97 --- .github/workflows/create-wheels.yaml | 4 ++-- .github/workflows/run-test.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 959a7c66ab0..cb6d16a7335 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -21,7 +21,7 @@ jobs: matrix: os: - "windows-latest" - - "macos-latest" + - "macos-13" python-version: - "3.6" - "3.7" @@ -35,7 +35,7 @@ jobs: - x86 exclude: - - os: "macos-latest" + - os: "macos-13" architecture: x86 fail-fast: false diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index c015ba4e947..8ad1d1deec4 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -30,7 +30,7 @@ jobs: - "ubuntu-20.04" - "ubuntu-latest" - "windows-latest" - - "macos-latest" + - "macos-13" python-version: - "3.6" - "3.7" @@ -63,7 +63,7 @@ jobs: architecture: x86 - os: "ubuntu-20.04" architecture: x86 - - os: "macos-latest" + - os: "macos-13" architecture: x86 # ubuntu-latest does not have: py27, py36 - os: "ubuntu-latest" From f8441937633c8e076d1e612ce89fcc6ca1298483 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 10 Jun 2024 22:59:49 -0400 Subject: [PATCH 590/632] include HasCTE traversal elements in TextualSelect Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method of the :class:`.TextualSelect` construct would not set a correct cache key which distinguished between different CTE expressions. Fixes: #11471 Change-Id: Ia9ce2c8cfd128f0f130aa9b26448dc23d994c324 (cherry picked from commit faecebc9df2a57173ee720973ba44ada370b682f) (cherry picked from commit 228d98e43a0bba1f33cd46f92f14851ecccbb728) --- doc/build/changelog/unreleased_14/11471.rst | 7 +++++++ lib/sqlalchemy/sql/selectable.py | 12 ++++++++---- test/sql/test_compare.py | 9 +++++++++ 3 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11471.rst diff --git a/doc/build/changelog/unreleased_14/11471.rst b/doc/build/changelog/unreleased_14/11471.rst new file mode 100644 index 00000000000..f669eabc789 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11471.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: bug, sql + :tickets: 11471 + + Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method + of the :class:`.TextualSelect` construct would not set a correct cache key + which distinguished between different CTE expressions. diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index b6e96c7b0c3..81fd45da927 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -6867,10 +6867,14 @@ class was renamed _label_style = LABEL_STYLE_NONE - _traverse_internals = [ - ("element", InternalTraversal.dp_clauseelement), - ("column_args", InternalTraversal.dp_clauseelement_list), - ] + SupportsCloneAnnotations._clone_annotations_traverse_internals + _traverse_internals = ( + [ + ("element", InternalTraversal.dp_clauseelement), + ("column_args", InternalTraversal.dp_clauseelement_list), + ] + + SupportsCloneAnnotations._clone_annotations_traverse_internals + + HasCTE._has_ctes_traverse_internals + ) _is_textual = True diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index d64deb86777..0a7f0d4114d 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -205,6 +205,15 @@ class CoreFixtures(object): bindparam("bar", type_=String) ), ), + lambda: ( + # test #11471 + text("select * from table") + .columns(a=Integer()) + .add_cte(table_b.select().cte()), + text("select * from table") + .columns(a=Integer()) + .add_cte(table_b.select().where(table_b.c.a > 5).cte()), + ), lambda: ( literal(1).op("+")(literal(1)), literal(1).op("-")(literal(1)), From 410a4f00ba701c5655fe5de69ab77e866fcc8ee5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 12 Jun 2024 12:42:29 -0400 Subject: [PATCH 591/632] open up async greenlet for third parties Modified the internal representation used for adapting asyncio calls to greenlets to allow for duck-typed compatibility with third party libraries that implement SQLAlchemy's "greenlet-to-asyncio" pattern directly. Running code within a greenlet that features the attribute ``__sqlalchemy_greenlet_provider__ = True`` will allow calls to :func:`sqlalchemy.util.await_only` directly. Change-Id: I79c67264e1a642b9a80d3b46dc64bdda80acf0aa (cherry picked from commit c1e2d9180a14c74495b712e08d8156b92f907ac0) (cherry picked from commit 1a6ff466b29ad3a114a27f2776538d8d998db2dd) --- .../unreleased_14/greenlet_compat.rst | 10 ++++ lib/sqlalchemy/util/_concurrency_py3k.py | 49 ++++++++++--------- 2 files changed, 35 insertions(+), 24 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/greenlet_compat.rst diff --git a/doc/build/changelog/unreleased_14/greenlet_compat.rst b/doc/build/changelog/unreleased_14/greenlet_compat.rst new file mode 100644 index 00000000000..d9eb51cd9c0 --- /dev/null +++ b/doc/build/changelog/unreleased_14/greenlet_compat.rst @@ -0,0 +1,10 @@ +.. change:: + :tags: usecase, engine + + Modified the internal representation used for adapting asyncio calls to + greenlets to allow for duck-typed compatibility with third party libraries + that implement SQLAlchemy's "greenlet-to-asyncio" pattern directly. + Running code within a greenlet that features the attribute + ``__sqlalchemy_greenlet_provider__ = True`` will allow calls to + :func:`sqlalchemy.util.await_only` directly. + diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index cc5b1c2faea..1e4ffefa401 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -37,9 +37,11 @@ def is_exit_exception(e): class _AsyncIoGreenlet(greenlet.greenlet): + + __sqlalchemy_greenlet_provider__ = True + def __init__(self, fn, driver): greenlet.greenlet.__init__(self, fn, driver) - self.driver = driver if _has_gr_context: self.gr_context = driver.gr_context @@ -55,7 +57,7 @@ def await_only(awaitable: Coroutine) -> Any: """ # this is called in the context greenlet while running fn current = greenlet.getcurrent() - if not isinstance(current, _AsyncIoGreenlet): + if not getattr(current, "__sqlalchemy_greenlet_provider__", False): raise exc.MissingGreenlet( "greenlet_spawn has not been called; can't call await_only() " "here. Was IO attempted in an unexpected place?" @@ -65,7 +67,7 @@ def await_only(awaitable: Coroutine) -> Any: # a coroutine to run. Once the awaitable is done, the driver greenlet # switches back to this greenlet with the result of awaitable that is # then returned to the caller (or raised as error) - return current.driver.switch(awaitable) + return current.parent.switch(awaitable) def await_fallback(awaitable: Coroutine) -> Any: @@ -79,7 +81,7 @@ def await_fallback(awaitable: Coroutine) -> Any: """ # this is called in the context greenlet while running fn current = greenlet.getcurrent() - if not isinstance(current, _AsyncIoGreenlet): + if not getattr(current, "__sqlalchemy_greenlet_provider__", False): loop = get_event_loop() if loop.is_running(): raise exc.MissingGreenlet( @@ -89,7 +91,7 @@ def await_fallback(awaitable: Coroutine) -> Any: ) return loop.run_until_complete(awaitable) - return current.driver.switch(awaitable) + return current.parent.switch(awaitable) async def greenlet_spawn( @@ -111,24 +113,21 @@ async def greenlet_spawn( # coroutine to wait. If the context is dead the function has # returned, and its result can be returned. switch_occurred = False - try: - result = context.switch(*args, **kwargs) - while not context.dead: - switch_occurred = True - try: - # wait for a coroutine from await_only and then return its - # result back to it. - value = await result - except BaseException: - # this allows an exception to be raised within - # the moderated greenlet so that it can continue - # its expected flow. - result = context.throw(*sys.exc_info()) - else: - result = context.switch(value) - finally: - # clean up to avoid cycle resolution by gc - del context.driver + result = context.switch(*args, **kwargs) + while not context.dead: + switch_occurred = True + try: + # wait for a coroutine from await_only and then return its + # result back to it. + value = await result + except BaseException: + # this allows an exception to be raised within + # the moderated greenlet so that it can continue + # its expected flow. + result = context.throw(*sys.exc_info()) + else: + result = context.switch(value) + if _require_await and not switch_occurred: raise exc.AwaitRequired( "The current operation required an async execution but none was " @@ -175,7 +174,9 @@ def _util_async_run(fn, *args, **kwargs): return loop.run_until_complete(greenlet_spawn(fn, *args, **kwargs)) else: # allow for a wrapped test function to call another - assert isinstance(greenlet.getcurrent(), _AsyncIoGreenlet) + assert getattr( + greenlet.getcurrent(), "__sqlalchemy_greenlet_provider__", False + ) return fn(*args, **kwargs) From 9f30f7951dccc0c34554d4c14bbc8c12688aa685 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 26 May 2024 11:34:27 -0400 Subject: [PATCH 592/632] backport of applicable 3.13 fixes Fixes: #11417 Change-Id: Ib2ceccd9583d8776700f0da5b591906efcfe6e6f (cherry picked from commit 754804635bc922c20d0b0075e0ed2da0add38742) --- doc/build/changelog/unreleased_14/11417.rst | 11 + lib/sqlalchemy/ext/serializer.py | 274 +++++++++++++------- setup.cfg | 44 +++- test/orm/test_mapper.py | 4 +- tox.ini | 71 +++-- 5 files changed, 283 insertions(+), 121 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11417.rst diff --git a/doc/build/changelog/unreleased_14/11417.rst b/doc/build/changelog/unreleased_14/11417.rst new file mode 100644 index 00000000000..5182c03ea86 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11417.rst @@ -0,0 +1,11 @@ +.. change:: + :tags: bug, general + :tickets: 11417 + + Set up full Python 3.13 support to the extent currently possible, repairing + issues within internal language helpers as well as the serializer extension + module. + + For version 1.4, this also modernizes the "extras" names in setup.cfg + to use dashes and not underscores for two-word names. Underscore names + are still present to accommodate potential compatibility issues. diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 987ec1f544d..689acae7e2b 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -71,102 +71,202 @@ from ..util import b64encode from ..util import byte_buffer from ..util import pickle +from ..util import py2k from ..util import text_type - __all__ = ["Serializer", "Deserializer", "dumps", "loads"] -def Serializer(*args, **kw): - pickler = pickle.Pickler(*args, **kw) - - def persistent_id(obj): - # print "serializing:", repr(obj) - if isinstance(obj, Mapper) and not obj.non_primary: - id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) - elif isinstance(obj, MapperProperty) and not obj.parent.non_primary: - id_ = ( - "mapperprop:" - + b64encode(pickle.dumps(obj.parent.class_)) - + ":" - + obj.key - ) - elif isinstance(obj, Table): - if "parententity" in obj._annotations: - id_ = "mapper_selectable:" + b64encode( - pickle.dumps(obj._annotations["parententity"].class_) +if py2k: + + def Serializer(*args, **kw): + pickler = pickle.Pickler(*args, **kw) + + def persistent_id(obj): + # print "serializing:", repr(obj) + if isinstance(obj, Mapper) and not obj.non_primary: + id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif ( + isinstance(obj, MapperProperty) and not obj.parent.non_primary + ): + id_ = ( + "mapperprop:" + + b64encode(pickle.dumps(obj.parent.class_)) + + ":" + + obj.key ) + elif isinstance(obj, Table): + if "parententity" in obj._annotations: + id_ = "mapper_selectable:" + b64encode( + pickle.dumps(obj._annotations["parententity"].class_) + ) + else: + id_ = "table:" + text_type(obj.key) + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id_ = ( + "column:" + + text_type(obj.table.key) + + ":" + + text_type(obj.key) + ) + elif isinstance(obj, Session): + id_ = "session:" + elif isinstance(obj, Engine): + id_ = "engine:" else: - id_ = "table:" + text_type(obj.key) - elif isinstance(obj, Column) and isinstance(obj.table, Table): - id_ = ( - "column:" + text_type(obj.table.key) + ":" + text_type(obj.key) - ) - elif isinstance(obj, Session): - id_ = "session:" - elif isinstance(obj, Engine): - id_ = "engine:" - else: - return None - return id_ - - pickler.persistent_id = persistent_id - return pickler - - -our_ids = re.compile( - r"(mapperprop|mapper|mapper_selectable|table|column|" - r"session|attribute|engine):(.*)" -) - - -def Deserializer(file, metadata=None, scoped_session=None, engine=None): - unpickler = pickle.Unpickler(file) - - def get_engine(): - if engine: - return engine - elif scoped_session and scoped_session().bind: - return scoped_session().bind - elif metadata and metadata.bind: - return metadata.bind - else: - return None - - def persistent_load(id_): - m = our_ids.match(text_type(id_)) - if not m: - return None - else: - type_, args = m.group(1, 2) - if type_ == "attribute": - key, clsarg = args.split(":") - cls = pickle.loads(b64decode(clsarg)) - return getattr(cls, key) - elif type_ == "mapper": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls) - elif type_ == "mapper_selectable": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls).__clause_element__() - elif type_ == "mapperprop": - mapper, keyname = args.split(":") - cls = pickle.loads(b64decode(mapper)) - return class_mapper(cls).attrs[keyname] - elif type_ == "table": - return metadata.tables[args] - elif type_ == "column": - table, colname = args.split(":") - return metadata.tables[table].c[colname] - elif type_ == "session": - return scoped_session() - elif type_ == "engine": - return get_engine() + return None + return id_ + + pickler.persistent_id = persistent_id + return pickler + + our_ids = re.compile( + r"(mapperprop|mapper|mapper_selectable|table|column|" + r"session|attribute|engine):(.*)" + ) + + def Deserializer(file, metadata=None, scoped_session=None, engine=None): + unpickler = pickle.Unpickler(file) + + def get_engine(): + if engine: + return engine + elif scoped_session and scoped_session().bind: + return scoped_session().bind + elif metadata and metadata.bind: + return metadata.bind else: - raise Exception("Unknown token: %s" % type_) + return None - unpickler.persistent_load = persistent_load - return unpickler + def persistent_load(id_): + m = our_ids.match(text_type(id_)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == "attribute": + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapper_selectable": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls).__clause_element__() + elif type_ == "mapperprop": + mapper, keyname = args.split(":") + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return metadata.tables[args] + elif type_ == "column": + table, colname = args.split(":") + return metadata.tables[table].c[colname] + elif type_ == "session": + return scoped_session() + elif type_ == "engine": + return get_engine() + else: + raise Exception("Unknown token: %s" % type_) + + unpickler.persistent_load = persistent_load + return unpickler + + +else: + + class Serializer(pickle.Pickler): + def persistent_id(self, obj): + # print "serializing:", repr(obj) + if isinstance(obj, Mapper) and not obj.non_primary: + id_ = "mapper:" + b64encode(pickle.dumps(obj.class_)) + elif ( + isinstance(obj, MapperProperty) and not obj.parent.non_primary + ): + id_ = ( + "mapperprop:" + + b64encode(pickle.dumps(obj.parent.class_)) + + ":" + + obj.key + ) + elif isinstance(obj, Table): + if "parententity" in obj._annotations: + id_ = "mapper_selectable:" + b64encode( + pickle.dumps(obj._annotations["parententity"].class_) + ) + else: + id_ = "table:" + text_type(obj.key) + elif isinstance(obj, Column) and isinstance(obj.table, Table): + id_ = ( + "column:" + + text_type(obj.table.key) + + ":" + + text_type(obj.key) + ) + elif isinstance(obj, Session): + id_ = "session:" + elif isinstance(obj, Engine): + id_ = "engine:" + else: + return None + return id_ + + our_ids = re.compile( + r"(mapperprop|mapper|mapper_selectable|table|column|" + r"session|attribute|engine):(.*)" + ) + + class Deserializer(pickle.Unpickler): + def __init__( + self, file, metadata=None, scoped_session=None, engine=None + ): + super().__init__(file) + self.metadata = metadata + self.scoped_session = scoped_session + self.engine = engine + + def get_engine(self): + if self.engine: + return self.engine + elif self.scoped_session and self.scoped_session().bind: + return self.scoped_session().bind + elif self.metadata and self.metadata.bind: + return self.metadata.bind + else: + return None + + def persistent_load(self, id_): + m = our_ids.match(text_type(id_)) + if not m: + return None + else: + type_, args = m.group(1, 2) + if type_ == "attribute": + key, clsarg = args.split(":") + cls = pickle.loads(b64decode(clsarg)) + return getattr(cls, key) + elif type_ == "mapper": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls) + elif type_ == "mapper_selectable": + cls = pickle.loads(b64decode(args)) + return class_mapper(cls).__clause_element__() + elif type_ == "mapperprop": + mapper, keyname = args.split(":") + cls = pickle.loads(b64decode(mapper)) + return class_mapper(cls).attrs[keyname] + elif type_ == "table": + return self.metadata.tables[args] + elif type_ == "column": + table, colname = args.split(":") + return self.metadata.tables[table].c[colname] + elif type_ == "session": + return self.scoped_session() + elif type_ == "engine": + return self.get_engine() + else: + raise Exception("Unknown token: %s" % type_) def dumps(obj, protocol=pickle.HIGHEST_PROTOCOL): diff --git a/setup.cfg b/setup.cfg index 23471142860..e4cee11058d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,25 +52,28 @@ mypy = mypy >= 0.910;python_version>="3" sqlalchemy2-stubs mssql = pyodbc -mssql_pymssql = pymssql -mssql_pyodbc = pyodbc + +mssql-pymssql = pymssql +mssql-pyodbc = pyodbc + mysql = mysqlclient>=1.4.0,<2;python_version<"3" mysqlclient>=1.4.0;python_version>="3" -mysql_connector = +mysql-connector = mysql-connector-python -mariadb_connector = +mariadb-connector = mariadb>=1.0.1,!=1.1.2;python_version>="3" oracle = cx_oracle>=7,<8;python_version<"3" cx_oracle>=7;python_version>="3" postgresql = psycopg2>=2.7 -postgresql_pg8000 = pg8000>=1.16.6,!=1.29.0 -postgresql_asyncpg = +postgresql-pg8000 = + pg8000>=1.16.6,!=1.29.0;python_version>="3" + +postgresql-asyncpg = %(asyncio)s asyncpg;python_version>="3" -postgresql_psycopg2binary = psycopg2-binary -postgresql_psycopg2cffi = psycopg2cffi + pymysql = pymysql;python_version>="3" pymysql<1;python_version<"3" @@ -87,6 +90,31 @@ aiosqlite = sqlcipher = sqlcipher3_binary;python_version>="3" +# legacy underscore names +# there appears as if there might be some dual-passes through this file +# when tox installs extras, sometimes looking for dashed names and sometimes +# looking first for underscore names. so the dash/underscore names here are +# kept entirely independent of each other else things dont seem to want +# to install +mssql_pymssql = pymssql +mssql_pyodbc = pyodbc + +mysql_connector = + mysql-connector-python + +mariadb_connector = + mariadb>=1.0.1,!=1.1.2;python_version>="3" + +postgresql_pg8000 = + pg8000>=1.16.6,!=1.29.0;python_version>="3" + +postgresql_asyncpg = + %(asyncio)s + asyncpg;python_version>="3" + +postgresql_psycopg2binary = psycopg2-binary +postgresql_psycopg2cffi = psycopg2cffi + [egg_info] tag_build = dev diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py index c8a87cf5b7d..1c46f316931 100644 --- a/test/orm/test_mapper.py +++ b/test/orm/test_mapper.py @@ -1794,12 +1794,12 @@ def _x(self): ) # object gracefully handles this condition - assert not hasattr(User.x, "__name__") + assert not hasattr(User.x, "foobar") assert not hasattr(User.x, "comparator") m.add_property("some_attr", column_property(users.c.name)) - assert not hasattr(User.x, "__name__") + assert not hasattr(User.x, "foobar") assert hasattr(User.x, "comparator") def test_synonym_of_non_property_raises(self): diff --git a/tox.ini b/tox.ini index 9a198e77d76..5372dbb34a7 100644 --- a/tox.ini +++ b/tox.ini @@ -2,6 +2,15 @@ [tox] envlist = py +[greenletextras] +extras= + asyncio + sqlite: aiosqlite + sqlite_file: aiosqlite + postgresql: postgresql-asyncpg + mysql: asyncmy + mysql: aiomysql + [testenv] # note that we have a .coveragerc file that points coverage specifically # at ./lib/sqlalchemy, and *not* at the build that tox might create under .tox. @@ -18,33 +27,30 @@ constrain_package_deps=false usedevelop= cov: True +extras= + py{3,38,39,310,311,312}: {[greenletextras]extras} + + postgresql: postgresql + postgresql: postgresql-pg8000 + + mysql: mysql + mysql: pymysql + mysql: mariadb-connector + + oracle: oracle + mssql: mssql + deps= pytest>=4.6.11,<5.0; python_version < '3' pytest>=6.2,<8; python_version >= '3' pytest-xdist mock; python_version < '3.3' - sqlite: .[aiosqlite] - sqlite_file: .[aiosqlite] - sqlite_file: .[sqlcipher]; python_version >= '3' and python_version < '3.10' - postgresql: .[postgresql] - py3{,7,8,9,10,11}-postgresql: .[postgresql_asyncpg]; python_version >= '3' - postgresql: .[postgresql_pg8000]; python_version >= '3' - mysql: .[mysql] - mysql: .[pymysql] - mysql: .[asyncmy]; python_version >= '3' - mysql: .[aiomysql]; python_version >= '3' - # mysql: .[mariadb_connector]; python_version >= '3' + py313: git+https://github.com/vstinner/greenlet@py313\#egg=greenlet - oracle: .[oracle] - - mssql: .[mssql] - - py312: greenlet>=3.0.0a1 - - dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git#egg=aiosqlite - dbapimain-sqlite: git+https://github.com/coleifer/sqlcipher3.git#egg=sqlcipher3 + dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git\#egg=aiosqlite + dbapimain-sqlite: git+https://github.com/coleifer/sqlcipher3.git\#egg=sqlcipher3 dbapimain-postgresql: git+https://github.com/psycopg/psycopg2.git#egg=psycopg2 dbapimain-postgresql: git+https://github.com/MagicStack/asyncpg.git#egg=asyncpg @@ -56,7 +62,8 @@ deps= dbapimain-oracle: git+https://github.com/oracle/python-cx_Oracle.git#egg=cx_Oracle - dbapimain-mssql: git+https://github.com/mkleehammer/pyodbc.git#egg=pyodbc + py313-mssql: git+https://github.com/mkleehammer/pyodbc.git\#egg=pyodbc + dbapimain-mssql: git+https://github.com/mkleehammer/pyodbc.git\#egg=pyodbc cov: pytest-cov @@ -91,8 +98,6 @@ setenv= WORKERS={env:TOX_WORKERS:-n4 --max-worker-restart=5} - - nocext: DISABLE_SQLALCHEMY_CEXT=1 cext: REQUIRE_SQLALCHEMY_CEXT=1 cov: COVERAGE={[testenv]cov_args} @@ -104,8 +109,14 @@ setenv= sqlite: SQLITE={env:TOX_SQLITE:--db sqlite} sqlite_file: SQLITE={env:TOX_SQLITE_FILE:--db sqlite_file} - py3{,5,6,7,8,9,10,11}-sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite} - py3{,5,6,7,8,9}-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite --dbdriver pysqlcipher} + + sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric --dbdriver aiosqlite} + py{313,314}-sqlite: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric} + + sqlite-nogreenlet: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver pysqlite_numeric} + + py{37,38,39}-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite --dbdriver pysqlcipher} + # omit pysqlcipher for Python 3.10 py3{,10,11}-sqlite_file: EXTRA_SQLITE_DRIVERS={env:EXTRA_SQLITE_DRIVERS:--dbdriver sqlite --dbdriver aiosqlite} @@ -134,6 +145,8 @@ setenv= oracle,mssql,sqlite_file: IDENTS=--write-idents db_idents.txt oracle,mssql,sqlite_file: MEMUSAGE=--nomemory + + # tox as of 2.0 blocks all environment variables from the # outside, unless they are here (or in TOX_TESTENV_PASSENV, # wildcards OK). Need at least these @@ -177,6 +190,10 @@ commands = # thanks to https://julien.danjou.info/the-best-flake8-extensions/ [testenv:pep8] basepython = python3 + +extras= + {[greenletextras]extras} + deps= flake8 #flake8-import-order @@ -196,6 +213,9 @@ commands = # command run in the github action when cext are active. [testenv:github-cext] +extras= + {[greenletextras]extras} + deps = {[testenv]deps} .[aiosqlite] commands= @@ -204,6 +224,9 @@ commands= # command run in the github action when cext are not active. [testenv:github-nocext] +extras= + {[greenletextras]extras} + deps = {[testenv]deps} .[aiosqlite] commands= From d9fb35baac5c03cdf09fc43570ca99ad0a69b913 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Jun 2024 11:44:54 -0400 Subject: [PATCH 593/632] pin setuptools below 69.3 and prepare for "build" for releases for 1.4, we introduce the build-system entry so that we can use "build" for releases Change-Id: Ib70446cc3c7d7d8acb264ffa2237a0c7aac5a0f5 (cherry picked from commit f6283dd6d902fd0d8b5a7ecc6c37c4ebde4d93f3) (cherry picked from commit 4b7ffd28e5bbc942c18b0c9b3bc9e50265ef15a1) --- pyproject.toml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index da6c4cba069..fb4c8e64ca5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,11 @@ +[build-system] +build-backend = "setuptools.build_meta" +requires = [ + # avoid moving to https://github.com/pypa/setuptools/issues/3593 + # until we're ready + "setuptools>=61.0,<69.3", +] + [tool.black] line-length = 79 target-version = ['py27', 'py36'] From 18272c5838090a74dab8e121a507ba5b6284bfb1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 20 Jun 2024 12:26:10 -0400 Subject: [PATCH 594/632] lower setuptools lower bound to suit python 2.7 looks like 44.x is the latest for 2.7. not sure how this passed on CI when we had it at 60? Change-Id: I9d351ef855de9d84482c458f22118b4f7b40f763 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fb4c8e64ca5..891120ab7e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,7 +3,7 @@ build-backend = "setuptools.build_meta" requires = [ # avoid moving to https://github.com/pypa/setuptools/issues/3593 # until we're ready - "setuptools>=61.0,<69.3", + "setuptools>=44,<69.3", ] [tool.black] From fc66fda85e30718a384e9d214b927842cbc7d442 Mon Sep 17 00:00:00 2001 From: "Benjamin A. Beasley" Date: Thu, 20 Jun 2024 11:02:39 -0400 Subject: [PATCH 595/632] SQLAlchemy 1.4: Fix building the C extension on Python 3.13 Adjustments to the C extensions, which are specific to the SQLAlchemy 1.x series, to work under Python 3.13. Pull request courtesy Ben Beasley. Fixes: #11499 Closes: #11500 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/11500 Pull-request-sha: 8a5888b147022b4586d30dcd7159e4fa6a31ec0e Change-Id: I1943eb387f9b075bf07e179f7a24762236e234bf --- doc/build/changelog/unreleased_14/11499.rst | 6 +++++ lib/sqlalchemy/cextension/resultproxy.c | 25 ++++++++++++++++++++- 2 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/11499.rst diff --git a/doc/build/changelog/unreleased_14/11499.rst b/doc/build/changelog/unreleased_14/11499.rst new file mode 100644 index 00000000000..e03062c1911 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11499.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, engine + :tickets: 11499 + + Adjustments to the C extensions, which are specific to the SQLAlchemy 1.x + series, to work under Python 3.13. Pull request courtesy Ben Beasley. diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index 8e8b6f9e4fe..a88af0ede96 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -821,6 +821,29 @@ typedef struct { static PyTypeObject tuplegetter_type; +static int +PyArg_NoKeywords(const char *funcname, PyObject *kwargs) +{ +#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 13 + /* Based on the one in CPython, removed from the public headers in 3.13 + * (https://github.com/python/cpython/issues/110964) + */ + if (kwargs == NULL) + return 1; + if (!PyDict_CheckExact(kwargs)) { + PyErr_BadInternalCall(); + return 0; + } + if (PyDict_GET_SIZE(kwargs) == 0) + return 1; + + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", funcname); + return 0; +#else + return _PyArg_NoKeywords(funcname, kwargs); +#endif +} + static PyObject * tuplegetter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { @@ -828,7 +851,7 @@ tuplegetter_new(PyTypeObject *type, PyObject *args, PyObject *kwds) PyObject *item; Py_ssize_t nitems; - if (!_PyArg_NoKeywords("tuplegetter", kwds)) + if (!PyArg_NoKeywords("tuplegetter", kwds)) return NULL; nitems = PyTuple_GET_SIZE(args); From 5da9d135b98efa572199e920b808d90abbe5dece Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 19 Jun 2024 11:03:25 -0400 Subject: [PATCH 596/632] use literal execute for SQL Server frame parameters Fixed issue where SQL Server drivers don't support bound parameters when rendering the "frame specification" for a window function, e.g. "ROWS BETWEEN", etc. Fixes: #11514 Change-Id: I0664f4076a2a8266434a4670949b8b44cd261f44 (cherry picked from commit c088b6426f1d73efe7de3e42b3e86f8027076bc3) (cherry picked from commit 9524e4bffc9c8545fdb8698ef029c420374ac00f) --- doc/build/changelog/unreleased_14/11514.rst | 8 ++++ lib/sqlalchemy/dialects/mssql/base.py | 4 ++ lib/sqlalchemy/testing/suite/test_select.py | 51 +++++++++++++++++++++ 3 files changed, 63 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/11514.rst diff --git a/doc/build/changelog/unreleased_14/11514.rst b/doc/build/changelog/unreleased_14/11514.rst new file mode 100644 index 00000000000..81f0ddeddc0 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11514.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, mssql + :tickets: 11514 + + Fixed issue where SQL Server drivers don't support bound parameters when + rendering the "frame specification" for a window function, e.g. "ROWS + BETWEEN", etc. + diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 326d9f54fcc..efaec75c540 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1847,6 +1847,10 @@ def __init__(self, *args, **kwargs): self.tablealiases = {} super(MSSQLCompiler, self).__init__(*args, **kwargs) + def _format_frame_clause(self, range_, **kw): + kw["literal_execute"] = True + return super()._format_frame_clause(range_, **kw) + def _with_legacy_schema_aliasing(fn): def decorate(self, *arg, **kw): if self.dialect.legacy_schema_aliasing: diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index 42369a4e0f0..eca2203d58f 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -1787,3 +1787,54 @@ def test_is_or_is_not_distinct_from( len(result), expected_row_count_for_is_not, ) + + +class WindowFunctionTest(fixtures.TablesTest): + __requires__ = ("window_functions",) + + __backend__ = True + + @classmethod + def define_tables(cls, metadata): + Table( + "some_table", + metadata, + Column("id", Integer, primary_key=True), + Column("col1", Integer), + Column("col2", Integer), + ) + + @classmethod + def insert_data(cls, connection): + connection.execute( + cls.tables.some_table.insert(), + [{"id": i, "col1": i, "col2": i * 5} for i in range(1, 50)], + ) + + def test_window(self, connection): + some_table = self.tables.some_table + rows = connection.execute( + select( + func.max(some_table.c.col2).over( + order_by=[some_table.c.col1.desc()] + ) + ).where(some_table.c.col1 < 20) + ).all() + + eq_(rows, [(95,) for i in range(19)]) + + def test_window_rows_between(self, connection): + some_table = self.tables.some_table + + # note the rows are part of the cache key right now, not handled + # as binds. this is issue #11515 + rows = connection.execute( + select( + func.max(some_table.c.col2).over( + order_by=[some_table.c.col1], + rows=(-5, 0), + ) + ) + ).all() + + eq_(rows, [(i,) for i in range(5, 250, 5)]) From de458b43550b836973103e9cd92fde59de602635 Mon Sep 17 00:00:00 2001 From: Andreas Motl Date: Sun, 23 Jun 2024 12:14:15 +0200 Subject: [PATCH 597/632] Documentation: Update package name for CrateDB dialect (#11503) The CrateDB SQLAlchemy dialect needs more love, so it was separated from the DBAPI HTTP driver. The new canonical package for the SQLAlchemy CrateDB dialect on PyPI is: https://pypi.org/project/sqlalchemy-cratedb/ (cherry picked from commit d4b28a4409ca233039896225f4e882a8b07e5b56) (cherry picked from commit b685bc7d211515711a3885fcdac4654de16db407) --- doc/build/dialects/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index 92675827b9e..47d6f50de84 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -95,7 +95,7 @@ Currently maintained external dialect projects for SQLAlchemy include: +------------------------------------------------+---------------------------------------+ | CockroachDB | sqlalchemy-cockroachdb_ | +------------------------------------------------+---------------------------------------+ -| CrateDB | crate-python_ | +| CrateDB | sqlalchemy-cratedb_ | +------------------------------------------------+---------------------------------------+ | EXASolution | sqlalchemy_exasol_ | +------------------------------------------------+---------------------------------------+ @@ -152,7 +152,7 @@ Currently maintained external dialect projects for SQLAlchemy include: .. _sqlalchemy-monetdb: https://github.com/gijzelaerr/sqlalchemy-monetdb .. _snowflake-sqlalchemy: https://github.com/snowflakedb/snowflake-sqlalchemy .. _sqlalchemy-tds: https://github.com/m32/sqlalchemy-tds -.. _crate-python: https://github.com/crate/crate-python +.. _sqlalchemy-cratedb: https://github.com/crate/sqlalchemy-cratedb .. _sqlalchemy-access: https://pypi.org/project/sqlalchemy-access/ .. _elasticsearch-dbapi: https://github.com/preset-io/elasticsearch-dbapi/ .. _pydruid: https://github.com/druid-io/pydruid From 8128b5609630ba318bd195406240b468ada8b5da Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 23 Jun 2024 10:18:47 -0400 Subject: [PATCH 598/632] fix default label style doc Change-Id: I793f7b62c6c0b551ab1957cabcff685885b6e51c (cherry picked from commit c43238252f96a1f9370d1bc7ff440897b751b2b8) --- lib/sqlalchemy/sql/selectable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 81fd45da927..95d2fdf245a 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -3582,7 +3582,7 @@ def set_label_style(self, style): :data:`_sql.LABEL_STYLE_DISAMBIGUATE_ONLY`, :data:`_sql.LABEL_STYLE_TABLENAME_PLUS_COL`, and :data:`_sql.LABEL_STYLE_NONE`. The default style is - :data:`_sql.LABEL_STYLE_TABLENAME_PLUS_COL`. + :data:`_sql.LABEL_STYLE_DISAMBIGUATE_ONLY`. In modern SQLAlchemy, there is not generally a need to change the labeling style, as per-expression labels are more effectively used by From 5baf033b8ce0e244f297ca597c0deda1fe34ffd7 Mon Sep 17 00:00:00 2001 From: lonkeknol Date: Thu, 27 Jun 2024 09:03:30 -0400 Subject: [PATCH 599/632] Docs: simplify language use for "Working with Transactions and the DBAPI" This is my first pull request to sqlalchemy. It changes the writing style of two paragraphs in the unified tutorial [here](https://docs.sqlalchemy.org/en/20/tutorial/dbapi_transactions.html#working-with-transactions-and-the-dbapi). My goals were to. 1. Make them easier to read 2. Not change the meaning of the text. 3. Get feedback on whether this type of contribution is considered useful for sqlalchemy. If this is a useful type of contribution, it might be good to discuss some general guidelines for me to adhere to as I continue. For instance: - Prefer using present simple tense - Remove superfluous words where possible - Keep the pull requests to one or two h2 sections at a time, to make the review easier This pull request is: - [x] A documentation / typographical / small typing error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. I'm curious to hear what you all think. **Have a nice day!** Closes: #11541 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/11541 Pull-request-sha: 3179690e6a5b47de99a4486a7a15cffbfacd380b Change-Id: I9b47f6ce4fd00c44c4b0e19957acf250f5e46d2f (cherry picked from commit fc2cb4496d35c0b8bb7d59aa74b553f07210eded) --- doc/build/tutorial/dbapi_transactions.rst | 34 +++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index 4eeee79e592..cd1ec0c2cfb 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -11,32 +11,32 @@ Working with Transactions and the DBAPI -With the :class:`_future.Engine` object ready to go, we may now proceed -to dive into the basic operation of an :class:`_future.Engine` and -its primary interactive endpoints, the :class:`_future.Connection` and -:class:`_engine.Result`. We will additionally introduce the ORM's -:term:`facade` for these objects, known as the :class:`_orm.Session`. +With the :class:`_future.Engine` object ready to go, we can +dive into the basic operation of an :class:`_future.Engine` and +its primary endpoints, the :class:`_future.Connection` and +:class:`_engine.Result`. We'll also introduce the ORM's :term:`facade` +for these objects, known as the :class:`_orm.Session`. .. container:: orm-header **Note to ORM readers** - When using the ORM, the :class:`_future.Engine` is managed by another - object called the :class:`_orm.Session`. The :class:`_orm.Session` in - modern SQLAlchemy emphasizes a transactional and SQL execution pattern that - is largely identical to that of the :class:`_future.Connection` discussed - below, so while this subsection is Core-centric, all of the concepts here - are essentially relevant to ORM use as well and is recommended for all ORM + When using the ORM, the :class:`_future.Engine` is managed by the + :class:`_orm.Session`. The :class:`_orm.Session` in modern SQLAlchemy + emphasizes a transactional and SQL execution pattern that is largely + identical to that of the :class:`_future.Connection` discussed below, + so while this subsection is Core-centric, all of the concepts here + are relevant to ORM use as well and is recommended for all ORM learners. The execution pattern used by the :class:`_future.Connection` - will be contrasted with that of the :class:`_orm.Session` at the end + will be compared to the :class:`_orm.Session` at the end of this section. As we have yet to introduce the SQLAlchemy Expression Language that is the -primary feature of SQLAlchemy, we will make use of one simple construct within -this package called the :func:`_sql.text` construct, which allows us to write -SQL statements as **textual SQL**. Rest assured that textual SQL in -day-to-day SQLAlchemy use is by far the exception rather than the rule for most -tasks, even though it always remains fully available. +primary feature of SQLAlchemy, we'll use a simple construct within +this package called the :func:`_sql.text` construct, to write +SQL statements as **textual SQL**. Rest assured that textual SQL is the +exception rather than the rule in day-to-day SQLAlchemy use, but it's +always available. .. rst-class:: core-header From 9f9d99928e6efb921c4dcd5febf810632c5521e8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 27 Jun 2024 18:17:47 -0400 Subject: [PATCH 600/632] cache key share; support correct traverse of 'of' Fixed caching issue where the :paramref:`_sql.Select.with_for_update.key_share` element of :meth:`_sql.Select.with_for_update` was not considered as part of the cache key, leading to incorrect caching if different variations of this parameter were used with an otherwise identical statement. Also repairs a traversal issue where the ``of`` element of ``ForUpdateArg`` when set to ``None`` cannot be compared against a non-None element because the traversal defines it as a clauselist. Traversal in this case is adjusted to accommodate for this case so that we dont need to create a risky-to-backport change to ``ForUpdateArg`` itself. Fixes: #11544 Change-Id: Ie8a50716df06977af58b0c22a8c10e1b64d972b9 (cherry picked from commit 6d2f43e14f2fe25cdc811355b7bd6d11f8eee381) (cherry picked from commit 522baa306fc788cf02acf29bf08e86a431a7050e) --- lib/sqlalchemy/sql/selectable.py | 1 + lib/sqlalchemy/sql/traversals.py | 2 ++ test/sql/test_compare.py | 15 +++++++++++++++ 3 files changed, 18 insertions(+) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 95d2fdf245a..7536665a6ea 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -2831,6 +2831,7 @@ class ForUpdateArg(ClauseElement): ("nowait", InternalTraversal.dp_boolean), ("read", InternalTraversal.dp_boolean), ("skip_locked", InternalTraversal.dp_boolean), + ("key_share", InternalTraversal.dp_boolean), ] @classmethod diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index 41b960c9c33..eb4913d7c33 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -1159,6 +1159,8 @@ def compare(self, obj1, obj2, **kw): return False else: continue + elif right_child is None: + return False comparison = dispatch( left_attrname, left, left_child, right, right_child, **kw diff --git a/test/sql/test_compare.py b/test/sql/test_compare.py index 0a7f0d4114d..3e13174f790 100644 --- a/test/sql/test_compare.py +++ b/test/sql/test_compare.py @@ -472,6 +472,21 @@ class CoreFixtures(object): select(table_a.c.a) .where(table_a.c.b == 5) .with_for_update(nowait=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, skip_locked=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, read=True), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(of=table_a.c.a), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(of=table_a.c.b), + select(table_a.c.a) + .where(table_a.c.b == 5) + .with_for_update(nowait=True, key_share=True), select(table_a.c.a).where(table_a.c.b == 5).correlate(table_b), select(table_a.c.a) .where(table_a.c.b == 5) From 8f279dd96e907242e9e86e7860c7136060c5db81 Mon Sep 17 00:00:00 2001 From: lonkeknol Date: Fri, 28 Jun 2024 12:27:33 -0400 Subject: [PATCH 601/632] Docs: simplify language in getting a connection & committing changes Simplifies language use in [Getting a Connection](https://docs.sqlalchemy.org/en/20/tutorial/dbapi_transactions.html#getting-a-connection) and [Committing Changes](https://docs.sqlalchemy.org/en/20/tutorial/dbapi_transactions.html#committing-changes) This pull request is: - [x] A documentation / typographical / small typing error fix - Good to go, no issue or tests are needed - [ ] A short code fix - please include the issue number, and create an issue if none exists, which must include a complete example of the issue. one line code fixes without an issue and demonstration will not be accepted. - Please include: `Fixes: #` in the commit message - please include tests. one line code fixes without tests will not be accepted. - [ ] A new feature implementation - please include the issue number, and create an issue if none exists, which must include a complete example of how the feature would look. - Please include: `Fixes: #` in the commit message - please include tests. **Have a nice day!** Closes: #11542 Pull-request: https://github.com/sqlalchemy/sqlalchemy/pull/11542 Pull-request-sha: d706e69fb6058d3483fce98cfacbbf36ca12d78e Change-Id: I7788f2a16a5127b3c9623f7b00f06f649b04e0fb (cherry picked from commit 82d14a7515187ad744037ca9017ced1782314854) --- doc/build/tutorial/dbapi_transactions.rst | 90 +++++++++++------------ 1 file changed, 43 insertions(+), 47 deletions(-) diff --git a/doc/build/tutorial/dbapi_transactions.rst b/doc/build/tutorial/dbapi_transactions.rst index cd1ec0c2cfb..c1e815a4c8a 100644 --- a/doc/build/tutorial/dbapi_transactions.rst +++ b/doc/build/tutorial/dbapi_transactions.rst @@ -45,17 +45,15 @@ always available. Getting a Connection --------------------- -The sole purpose of the :class:`_future.Engine` object from a user-facing -perspective is to provide a unit of -connectivity to the database called the :class:`_future.Connection`. When -working with the Core directly, the :class:`_future.Connection` object -is how all interaction with the database is done. As the :class:`_future.Connection` -represents an open resource against the database, we want to always limit -the scope of our use of this object to a specific context, and the best -way to do that is by using Python context manager form, also known as -`the with statement `_. -Below we illustrate "Hello World", using a textual SQL statement. Textual -SQL is emitted using a construct called :func:`_sql.text` that will be discussed +The purpose of the :class:`_future.Engine` is to connect to the database by +providing a :class:`_future.Connection` object. When working with the Core +directly, the :class:`_future.Connection` object is how all interaction with the +database is done. Because the :class:`_future.Connection` creates an open +resource against the database, we want to limit our use of this object to a +specific context. The best way to do that is with a Python context manager, also +known as `the with statement `_. +Below we use a textual SQL statement to show "Hello World". Textual SQL is +created with a construct called :func:`_sql.text` which we'll discuss in more detail later: .. sourcecode:: pycon+sql @@ -71,21 +69,21 @@ in more detail later: {stop}[('hello world',)] {opensql}ROLLBACK{stop} -In the above example, the context manager provided for a database connection -and also framed the operation inside of a transaction. The default behavior of -the Python DBAPI includes that a transaction is always in progress; when the -scope of the connection is :term:`released`, a ROLLBACK is emitted to end the -transaction. The transaction is **not committed automatically**; when we want -to commit data we normally need to call :meth:`_future.Connection.commit` +In the example above, the context manager creates a database connection +and executes the operation in a transaction. The default behavior of +the Python DBAPI is that a transaction is always in progress; when the +connection is :term:`released`, a ROLLBACK is emitted to end the +transaction. The transaction is **not committed automatically**; if we want +to commit data we need to call :meth:`_future.Connection.commit` as we'll see in the next section. .. tip:: "autocommit" mode is available for special cases. The section :ref:`dbapi_autocommit` discusses this. -The result of our SELECT was also returned in an object called -:class:`_engine.Result` that will be discussed later, however for the moment -we'll add that it's best to ensure this object is consumed within the -"connect" block, and is not passed along outside of the scope of our connection. +The result of our SELECT was returned in an object called +:class:`_engine.Result` that will be discussed later. For the moment +we'll add that it's best to use this object within the "connect" block, +and to not use it outside of the scope of our connection. .. rst-class:: core-header @@ -94,11 +92,11 @@ we'll add that it's best to ensure this object is consumed within the Committing Changes ------------------ -We just learned that the DBAPI connection is non-autocommitting. What if -we want to commit some data? We can alter our above example to create a -table and insert some data, and the transaction is then committed using -the :meth:`_future.Connection.commit` method, invoked **inside** the block -where we acquired the :class:`_future.Connection` object: +We just learned that the DBAPI connection doesn't commit automatically. +What if we want to commit some data? We can change our example above to create a +table, insert some data and then commit the transaction using +the :meth:`_future.Connection.commit` method, **inside** the block +where we have the :class:`_future.Connection` object: .. sourcecode:: pycon+sql @@ -119,25 +117,23 @@ where we acquired the :class:`_future.Connection` object: COMMIT -Above, we emitted two SQL statements that are generally transactional, a -"CREATE TABLE" statement [1]_ and an "INSERT" statement that's parameterized -(the parameterization syntax above is discussed a few sections below in -:ref:`tutorial_multiple_parameters`). As we want the work we've done to be -committed within our block, we invoke the +Above, we execute two SQL statements, a "CREATE TABLE" statement [1]_ +and an "INSERT" statement that's parameterized (we discuss the parameterization syntax +later in :ref:`tutorial_multiple_parameters`). +To commit the work we've done in our block, we call the :meth:`_future.Connection.commit` method which commits the transaction. After -we call this method inside the block, we can continue to run more SQL -statements and if we choose we may call :meth:`_future.Connection.commit` -again for subsequent statements. SQLAlchemy refers to this style as **commit as +this, we can continue to run more SQL statements and call :meth:`_future.Connection.commit` +again for those statements. SQLAlchemy refers to this style as **commit as you go**. -There is also another style of committing data, which is that we can declare -our "connect" block to be a transaction block up front. For this mode of -operation, we use the :meth:`_future.Engine.begin` method to acquire the -connection, rather than the :meth:`_future.Engine.connect` method. This method -will both manage the scope of the :class:`_future.Connection` and also -enclose everything inside of a transaction with COMMIT at the end, assuming -a successful block, or ROLLBACK in case of exception raise. This style -may be referred towards as **begin once**: +There's also another style to commit data. We can declare +our "connect" block to be a transaction block up front. To do this, we use the +:meth:`_future.Engine.begin` method to get the connection, rather than the +:meth:`_future.Engine.connect` method. This method +will manage the scope of the :class:`_future.Connection` and also +enclose everything inside of a transaction with either a COMMIT at the end +if the block was successful, or a ROLLBACK if an exception was raised. This style +is known as **begin once**: .. sourcecode:: pycon+sql @@ -153,9 +149,9 @@ may be referred towards as **begin once**: COMMIT -"Begin once" style is often preferred as it is more succinct and indicates the -intention of the entire block up front. However, within this tutorial we will -normally use "commit as you go" style as it is more flexible for demonstration +You should mostly prefer the "begin once" style because it's shorter and shows the +intention of the entire block up front. However, in this tutorial we'll +use "commit as you go" style as it's more flexible for demonstration purposes. .. topic:: What's "BEGIN (implicit)"? @@ -169,8 +165,8 @@ purposes. .. [1] :term:`DDL` refers to the subset of SQL that instructs the database to create, modify, or remove schema-level constructs such as tables. DDL - such as "CREATE TABLE" is recommended to be within a transaction block that - ends with COMMIT, as many databases uses transactional DDL such that the + such as "CREATE TABLE" should be in a transaction block that + ends with COMMIT, as many databases use transactional DDL such that the schema changes don't take place until the transaction is committed. However, as we'll see later, we usually let SQLAlchemy run DDL sequences for us as part of a higher level operation where we don't generally need to worry From cc240eeb4c5755ceb587c60f3125a4864c29fecc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 2 Jul 2024 13:57:47 -0400 Subject: [PATCH 602/632] call iter() on detached/transient dynamic session Fixed regression going back to 1.4 where accessing a collection using the "dynamic" strategy on a transient object and attempting to query would raise an internal error rather than the expected :class:`.NoResultFound` that occurred in 1.3. Fixes: #11562 Change-Id: I650305963a17592413520d8d1049c601761a0acc (cherry picked from commit 4208993938302e34a67e57af710be7d98ff37659) (cherry picked from commit 1ad8edb9e168bf2f7de88114f7bb6e25c8155b69) --- doc/build/changelog/unreleased_14/11562.rst | 8 +++++++ lib/sqlalchemy/orm/dynamic.py | 10 +++++---- setup.cfg | 2 +- test/orm/test_dynamic.py | 25 +++++++++++++++++++++ 4 files changed, 40 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11562.rst diff --git a/doc/build/changelog/unreleased_14/11562.rst b/doc/build/changelog/unreleased_14/11562.rst new file mode 100644 index 00000000000..15ccd0df6d2 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11562.rst @@ -0,0 +1,8 @@ +.. change:: + :tags: bug, orm, regression + :tickets: 11562 + + Fixed regression going back to 1.4 where accessing a collection using the + "dynamic" strategy on a transient object and attempting to query would + raise an internal error rather than the expected :class:`.NoResultFound` + that occurred in 1.3. diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index 5d5ce3642cb..0a0d17c08d8 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -344,10 +344,12 @@ def _iter(self): return result.IteratorResult( result.SimpleResultMetaData([self.attr.class_.__name__]), - self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE, - ).added_items, + iter( + self.attr._get_collection_history( + attributes.instance_state(self.instance), + attributes.PASSIVE_NO_INITIALIZE, + ).added_items + ), _source_supports_scalars=True, ).scalars() else: diff --git a/setup.cfg b/setup.cfg index e4cee11058d..3f8003a1ed3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -142,7 +142,7 @@ ignore = N801,N802,N806, RST304,RST303,RST299,RST399, W503,W504 - U100 + U100,U101 IS001 exclude = .venv,.git,.tox,dist,doc,*egg,build import-order-style = google diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py index 0cb4d76d9c2..3997aa9710c 100644 --- a/test/orm/test_dynamic.py +++ b/test/orm/test_dynamic.py @@ -238,6 +238,31 @@ def my_filter(self, arg): use_default_dialect=True, ) + @testing.combinations( + ("all", []), + ("one", exc.NoResultFound), + ("one_or_none", None), + argnames="method, expected", + ) + @testing.variation("add_to_session", [True, False]) + def test_transient_raise(self, method, expected, add_to_session): + """test 11562""" + User, Address = self._user_address_fixture() + + u1 = User(name="u1") + if add_to_session: + sess = fixture_session() + sess.add(u1) + + meth = getattr(u1.addresses, method) + if expected is exc.NoResultFound: + with expect_raises_message( + exc.NoResultFound, "No row was found when one was required" + ): + meth() + else: + eq_(meth(), expected) + def test_detached_raise(self): """so filtering on a detached dynamic list raises an error...""" From 024a0a48c65f4acbbabf85b896470a5868ec48d3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 6 Jul 2024 12:24:51 -0400 Subject: [PATCH 603/632] ensure intro to "functions have types" is not misleading make sure it's clear that there is normally not a return type for SQL functions Fixes: #11578 Change-Id: Ia0b66e7fe685dad427822345dd232eb47a0fc44f (cherry picked from commit e9d3e49601d011f9a3471921729728ca688e04b9) (cherry picked from commit 5e495e16da87644bcb07aa76c9021d486053b81d) --- doc/build/tutorial/data_select.rst | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/build/tutorial/data_select.rst b/doc/build/tutorial/data_select.rst index b34ab648cd3..a47b0ca4edc 100644 --- a/doc/build/tutorial/data_select.rst +++ b/doc/build/tutorial/data_select.rst @@ -1396,11 +1396,18 @@ as opposed to the "return type" of a Python function. The SQL return type of any SQL function may be accessed, typically for debugging purposes, by referring to the :attr:`_functions.Function.type` -attribute:: +attribute; this will be pre-configured for a **select few** of extremely +common SQL functions, but for most SQL functions is the "null" datatype +if not otherwise specified:: + >>> # pre-configured SQL function (only a few dozen of these) >>> func.now().type DateTime() + >>> # arbitrary SQL function (all other SQL functions) + >>> func.run_some_calculation().type + NullType() + These SQL return types are significant when making use of the function expression in the context of a larger expression; that is, math operators will work better when the datatype of the expression is From fdfb3a2842c3084e791bfb5d6e2e4369b8f7d8f1 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sun, 7 Jul 2024 11:56:56 +0200 Subject: [PATCH 604/632] Improve generated reflection in sqlite Fixed reflection of computed column in SQLite to properly account for complex expressions. Fixes: #11582 Change-Id: I8e9fdda3e47c04b376973ee245b3175374a08f56 (cherry picked from commit e67a0b77a82667e2199e333bae0606d143fa228e) --- doc/build/changelog/unreleased_14/11582.rst | 6 ++ lib/sqlalchemy/dialects/sqlite/base.py | 13 ++- test/dialect/test_sqlite.py | 107 ++++++++++++++++++-- 3 files changed, 119 insertions(+), 7 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11582.rst diff --git a/doc/build/changelog/unreleased_14/11582.rst b/doc/build/changelog/unreleased_14/11582.rst new file mode 100644 index 00000000000..935af9b2444 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11582.rst @@ -0,0 +1,6 @@ +.. change:: + :tags: bug, reflection, sqlite + :tickets: 11582 + + Fixed reflection of computed column in SQLite to properly account + for complex expressions. diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index bcf38edc729..c171136ac2b 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -2100,6 +2100,14 @@ def get_columns(self, connection, table_name, schema=None, **kw): tablesql = self._get_table_sql( connection, table_name, schema, **kw ) + # remove create table + match = re.match( + r"create table .*?\((.*)\)$", + tablesql.strip(), + re.DOTALL | re.IGNORECASE, + ) + assert match, "create table not found in %s" % tablesql + tablesql = match.group(1).strip() columns.append( self._get_column_info( @@ -2149,7 +2157,10 @@ def _get_column_info( if generated: sqltext = "" if tablesql: - pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?" + pattern = ( + r"[^,]*\s+GENERATED\s+ALWAYS\s+AS" + r"\s+\((.*)\)\s*(?:virtual|stored)?" + ) match = re.search( re.escape(name) + pattern, tablesql, re.IGNORECASE ) diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 418bf9c6575..12e607020e0 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -54,6 +54,7 @@ from sqlalchemy.testing import expect_warnings from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ +from sqlalchemy.testing import is_true from sqlalchemy.testing import mock from sqlalchemy.testing.assertions import expect_raises_message from sqlalchemy.types import Boolean @@ -114,7 +115,7 @@ def test_string_dates_passed_raise(self, connection): ) def test_cant_parse_datetime_message(self, connection): - for (typ, disp) in [ + for typ, disp in [ (Time, "time"), (DateTime, "datetime"), (Date, "date"), @@ -992,7 +993,6 @@ def test_col_targeting_union(self): class SQLTest(fixtures.TestBase, AssertsCompiledSQL): - """Tests SQLite-dialect specific compilation.""" __dialect__ = sqlite.dialect() @@ -1387,7 +1387,6 @@ def test_on_conflict_clause_primary_key_constraint(self): class InsertTest(fixtures.TestBase, AssertsExecutionResults): - """Tests inserts and autoincrement.""" __only_on__ = "sqlite" @@ -2385,8 +2384,8 @@ def test_create_index_with_schema(self): [ { "unique": 0, - "name": u"ix_main_l_bar", - "column_names": [u"bar"], + "name": "ix_main_l_bar", + "column_names": ["bar"], "dialect_options": {}, } ], @@ -2586,7 +2585,6 @@ def test_constraint_cols( class SavepointTest(fixtures.TablesTest): - """test that savepoints work when we use the correct event setup""" __only_on__ = "sqlite" @@ -3544,3 +3542,100 @@ def test_on_conflict_do_update_special_types_in_set(self, connection): conn.scalar(sql.select(bind_targets.c.data)), "new updated data processed", ) + + +class ComputedReflectionTest(fixtures.TestBase): + __only_on__ = "sqlite" + __backend__ = True + + @classmethod + def setup_test_class(cls): + tables = [ + """CREATE TABLE test1 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') + );""", + """CREATE TABLE test2 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x'), + y VARCHAR GENERATED ALWAYS AS (s || 'y') + );""", + """CREATE TABLE test3 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) + );""", + """CREATE TABLE test4 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")), + y INTEGER GENERATED ALWAYS AS (INSTR(x, ",")));""", + """CREATE TABLE test5 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') STORED + );""", + """CREATE TABLE test6 ( + s VARCHAR, + x VARCHAR GENERATED ALWAYS AS (s || 'x') STORED, + y VARCHAR GENERATED ALWAYS AS (s || 'y') STORED + );""", + """CREATE TABLE test7 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) STORED + );""", + """CREATE TABLE test8 ( + s VARCHAR, + x INTEGER GENERATED ALWAYS AS (INSTR(s, ",")) STORED, + y INTEGER GENERATED ALWAYS AS (INSTR(x, ",")) STORED + );""", + ] + + with testing.db.begin() as conn: + for ct in tables: + conn.exec_driver_sql(ct) + + @classmethod + def teardown_test_class(cls): + with testing.db.begin() as conn: + for tn in cls.res: + conn.exec_driver_sql("DROP TABLE %s" % tn) + + res = { + "test1": {"x": {"text": "s || 'x'", "stored": False}}, + "test2": { + "x": {"text": "s || 'x'", "stored": False}, + "y": {"text": "s || 'y'", "stored": False}, + }, + "test3": {"x": {"text": 'INSTR(s, ",")', "stored": False}}, + "test4": { + "x": {"text": 'INSTR(s, ",")', "stored": False}, + "y": {"text": 'INSTR(x, ",")', "stored": False}, + }, + "test5": {"x": {"text": "s || 'x'", "stored": True}}, + "test6": { + "x": {"text": "s || 'x'", "stored": True}, + "y": {"text": "s || 'y'", "stored": True}, + }, + "test7": {"x": {"text": 'INSTR(s, ",")', "stored": True}}, + "test8": { + "x": {"text": 'INSTR(s, ",")', "stored": True}, + "y": {"text": 'INSTR(x, ",")', "stored": True}, + }, + } + + def test_reflection(self, connection): + meta = MetaData() + meta.reflect(connection) + eq_(len(meta.tables), len(self.res)) + for tbl in meta.tables.values(): + data = self.res[tbl.name] + seen = set() + for col in tbl.c: + if col.name not in data: + is_(col.computed, None) + else: + info = data[col.name] + seen.add(col.name) + msg = "%s-%s" % (tbl.name, col.name) + is_true(bool(col.computed)) + eq_(col.computed.sqltext.text, info["text"], msg) + eq_(col.computed.persisted, info["stored"], msg) + eq_(seen, set(data.keys())) From 9fe159b2d8973d800fbef26ac6f375f0a91d5398 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 15 Jul 2024 09:15:36 -0400 Subject: [PATCH 605/632] remove redundant(?) Isolation / isolation References: https://github.com/sqlalchemy/sqlalchemy/discussions/11609 Change-Id: I8ada4b8ed64a6d6b9cb923503fda6d7b4888f429 (cherry picked from commit e44e805506fa71318e23a2bfad733fbbf5a9ee59) --- doc/build/glossary.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst index b7d5476e46c..51d98f4655f 100644 --- a/doc/build/glossary.rst +++ b/doc/build/glossary.rst @@ -908,7 +908,6 @@ Glossary isolation isolated - Isolation isolation level The isolation property of the :term:`ACID` model ensures that the concurrent execution From 94b174d2216e008ee3da4ee78e001eae9ee71186 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Jul 2024 23:05:43 -0400 Subject: [PATCH 606/632] add missing changelog for #11544 Fixes: #11544 Change-Id: Ibf57f6ee0fee105672b03c2bf6690cad6bb0932d (cherry picked from commit 800932af467109f06c0196c42ae86272a5d7f96a) --- doc/build/changelog/unreleased_14/11544.rst | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 doc/build/changelog/unreleased_14/11544.rst diff --git a/doc/build/changelog/unreleased_14/11544.rst b/doc/build/changelog/unreleased_14/11544.rst new file mode 100644 index 00000000000..82639e54e84 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11544.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, sql + :tickets: 11544 + + Fixed caching issue where the + :paramref:`_sql.Select.with_for_update.key_share` element of + :meth:`_sql.Select.with_for_update` was not considered as part of the cache + key, leading to incorrect caching if different variations of this parameter + were used with an otherwise identical statement. From 7b52230d43d7713bb4ee54174507c1e96990236f Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 22 Jul 2024 23:17:45 +0200 Subject: [PATCH 607/632] update typing for mypy 1.11; pin plugin to <1.11 (1.4 specific notes) The legacy mypy plugin is no longer fully functional with the latest series of mypy 1.11.0, as changes in the mypy interpreter are no longer compatible with the approach used by the plugin. If code is dependent on the legacy mypy plugin with sqlalchemy2-stubs, it's recommended to pin mypy to be below the 1.11.0 series. Seek upgrading to the 2.0 series of SQLAlchemy and migrating to the modern type annotations. Change-Id: Ib8fef93ede588430dc0f7ed44ef887649a415821 cherry picked from commit 0741fe45f62f89845b15d7faea209b26a652a174) Change-Id: I61f50539cda851b98178060410fedaa70971d01a --- doc/build/changelog/unreleased_14/mypy1110.rst | 14 ++++++++++++++ doc/build/orm/extensions/mypy.rst | 12 ++++++++---- tox.ini | 2 +- 3 files changed, 23 insertions(+), 5 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/mypy1110.rst diff --git a/doc/build/changelog/unreleased_14/mypy1110.rst b/doc/build/changelog/unreleased_14/mypy1110.rst new file mode 100644 index 00000000000..1dc5e0dc3ec --- /dev/null +++ b/doc/build/changelog/unreleased_14/mypy1110.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, mypy + :versions: 2.0 + + The deprecated mypy plugin is no longer fully functional with the latest + series of mypy 1.11.0, as changes in the mypy interpreter are no longer + compatible with the approach used by the plugin. If code is dependent on + the mypy plugin with sqlalchemy2-stubs, it's recommended to pin mypy to be + below the 1.11.0 series. Seek upgrading to the 2.0 series of SQLAlchemy + and migrating to the modern type annotations. + + .. seealso:: + + :ref:`mypy_toplevel` diff --git a/doc/build/orm/extensions/mypy.rst b/doc/build/orm/extensions/mypy.rst index 6c94ae5f712..0b7e332c533 100644 --- a/doc/build/orm/extensions/mypy.rst +++ b/doc/build/orm/extensions/mypy.rst @@ -10,7 +10,8 @@ MyPy_ type checking tool. **The SQLAlchemy Mypy Plugin is DEPRECATED, and will be removed possibly as early as the SQLAlchemy 2.1 release. We would urge users to please - migrate away from it ASAP.** + migrate away from it ASAP. The mypy plugin also works only up until + mypy version 1.10.1. version 1.11.0 and greater may not work properly.** This plugin cannot be maintained across constantly changing releases of mypy and its stability going forward CANNOT be guaranteed. @@ -21,7 +22,11 @@ MyPy_ type checking tool. .. topic:: SQLAlchemy Mypy Plugin Status Update - **Updated July 2023** + **Updated July 2024** + + The mypy plugin is supported **only up until mypy 1.10.1, and it will have + issues running with 1.11.0 or greater**. Use with mypy 1.11.0 or greater + may have error conditions which currently cannot be resolved. For SQLAlchemy 2.0, the Mypy plugin continues to work at the level at which it reached in the SQLAlchemy 1.4 release. SQLAlchemy 2.0 however features @@ -168,8 +173,7 @@ following:: ) name: Mapped[Optional[str]] = Mapped._special_method(Column(String)) - def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: - ... + def __init__(self, id: Optional[int] = ..., name: Optional[str] = ...) -> None: ... some_user = User(id=5, name="user") diff --git a/tox.ini b/tox.ini index 5372dbb34a7..d8511d3bdad 100644 --- a/tox.ini +++ b/tox.ini @@ -181,7 +181,7 @@ deps= greenlet != 0.4.17 mock; python_version < '3.3' importlib_metadata; python_version < '3.8' - mypy + mypy >= 1.2.0,<1.11 patch==1.* git+https://github.com/sqlalchemy/sqlalchemy2-stubs commands = From 9f241dc0bdc2a0c82955dc95aefc4f8e0d742b8f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Jul 2024 11:58:38 -0400 Subject: [PATCH 608/632] add missing forwards port versions I totally forgot these meaning they wont show up in 2.0 changelogs, so this is a big mistake i have to stop making Change-Id: I5f998eecbfa8aceab3ee247bb3a00e13820af872 (cherry picked from commit 1ba11863398153760952261adff08d544a508c3a) --- doc/build/changelog/unreleased_14/11417.rst | 1 + doc/build/changelog/unreleased_14/11471.rst | 1 + doc/build/changelog/unreleased_14/11514.rst | 1 + doc/build/changelog/unreleased_14/11544.rst | 1 + doc/build/changelog/unreleased_14/11562.rst | 1 + doc/build/changelog/unreleased_14/11582.rst | 1 + doc/build/changelog/unreleased_14/greenlet_compat.rst | 1 + doc/build/changelog/unreleased_14/mypy1110.rst | 2 +- 8 files changed, 8 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/unreleased_14/11417.rst b/doc/build/changelog/unreleased_14/11417.rst index 5182c03ea86..b37af43e3d3 100644 --- a/doc/build/changelog/unreleased_14/11417.rst +++ b/doc/build/changelog/unreleased_14/11417.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, general :tickets: 11417 + :versions: 2.0.31 Set up full Python 3.13 support to the extent currently possible, repairing issues within internal language helpers as well as the serializer extension diff --git a/doc/build/changelog/unreleased_14/11471.rst b/doc/build/changelog/unreleased_14/11471.rst index f669eabc789..47fda837575 100644 --- a/doc/build/changelog/unreleased_14/11471.rst +++ b/doc/build/changelog/unreleased_14/11471.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, sql :tickets: 11471 + :versions: 2.0.31 Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method of the :class:`.TextualSelect` construct would not set a correct cache key diff --git a/doc/build/changelog/unreleased_14/11514.rst b/doc/build/changelog/unreleased_14/11514.rst index 81f0ddeddc0..145f87f4384 100644 --- a/doc/build/changelog/unreleased_14/11514.rst +++ b/doc/build/changelog/unreleased_14/11514.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, mssql :tickets: 11514 + :versions: 2.0.32 Fixed issue where SQL Server drivers don't support bound parameters when rendering the "frame specification" for a window function, e.g. "ROWS diff --git a/doc/build/changelog/unreleased_14/11544.rst b/doc/build/changelog/unreleased_14/11544.rst index 82639e54e84..6bc3b9705f4 100644 --- a/doc/build/changelog/unreleased_14/11544.rst +++ b/doc/build/changelog/unreleased_14/11544.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, sql :tickets: 11544 + :versions: 2.0 Fixed caching issue where the :paramref:`_sql.Select.with_for_update.key_share` element of diff --git a/doc/build/changelog/unreleased_14/11562.rst b/doc/build/changelog/unreleased_14/11562.rst index 15ccd0df6d2..beaad363351 100644 --- a/doc/build/changelog/unreleased_14/11562.rst +++ b/doc/build/changelog/unreleased_14/11562.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, orm, regression :tickets: 11562 + :versions: 2.0.32 Fixed regression going back to 1.4 where accessing a collection using the "dynamic" strategy on a transient object and attempting to query would diff --git a/doc/build/changelog/unreleased_14/11582.rst b/doc/build/changelog/unreleased_14/11582.rst index 935af9b2444..6a2009cbae4 100644 --- a/doc/build/changelog/unreleased_14/11582.rst +++ b/doc/build/changelog/unreleased_14/11582.rst @@ -1,6 +1,7 @@ .. change:: :tags: bug, reflection, sqlite :tickets: 11582 + :versions: 2.0.32 Fixed reflection of computed column in SQLite to properly account for complex expressions. diff --git a/doc/build/changelog/unreleased_14/greenlet_compat.rst b/doc/build/changelog/unreleased_14/greenlet_compat.rst index d9eb51cd9c0..95ce98113df 100644 --- a/doc/build/changelog/unreleased_14/greenlet_compat.rst +++ b/doc/build/changelog/unreleased_14/greenlet_compat.rst @@ -1,5 +1,6 @@ .. change:: :tags: usecase, engine + :versions: 2.0.31 Modified the internal representation used for adapting asyncio calls to greenlets to allow for duck-typed compatibility with third party libraries diff --git a/doc/build/changelog/unreleased_14/mypy1110.rst b/doc/build/changelog/unreleased_14/mypy1110.rst index 1dc5e0dc3ec..3f1fe05ce2d 100644 --- a/doc/build/changelog/unreleased_14/mypy1110.rst +++ b/doc/build/changelog/unreleased_14/mypy1110.rst @@ -1,6 +1,6 @@ .. change:: :tags: bug, mypy - :versions: 2.0 + :versions: 2.0.32 The deprecated mypy plugin is no longer fully functional with the latest series of mypy 1.11.0, as changes in the mypy interpreter are no longer From 257a65d755a0dac4513bb6a35d279b05ceb7e079 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Jul 2024 12:18:22 -0400 Subject: [PATCH 609/632] - 1.4.53 --- doc/build/changelog/changelog_14.rst | 97 ++++++++++++++++++- doc/build/changelog/unreleased_14/11417.rst | 12 --- doc/build/changelog/unreleased_14/11471.rst | 8 -- doc/build/changelog/unreleased_14/11499.rst | 6 -- doc/build/changelog/unreleased_14/11514.rst | 9 -- doc/build/changelog/unreleased_14/11544.rst | 10 -- doc/build/changelog/unreleased_14/11562.rst | 9 -- doc/build/changelog/unreleased_14/11582.rst | 7 -- .../unreleased_14/greenlet_compat.rst | 11 --- .../changelog/unreleased_14/mypy1110.rst | 14 --- doc/build/conf.py | 4 +- 11 files changed, 98 insertions(+), 89 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/11417.rst delete mode 100644 doc/build/changelog/unreleased_14/11471.rst delete mode 100644 doc/build/changelog/unreleased_14/11499.rst delete mode 100644 doc/build/changelog/unreleased_14/11514.rst delete mode 100644 doc/build/changelog/unreleased_14/11544.rst delete mode 100644 doc/build/changelog/unreleased_14/11562.rst delete mode 100644 doc/build/changelog/unreleased_14/11582.rst delete mode 100644 doc/build/changelog/unreleased_14/greenlet_compat.rst delete mode 100644 doc/build/changelog/unreleased_14/mypy1110.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 01572e55c83..8532d76967e 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,102 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.53 - :include_notes_from: unreleased_14 + :released: July 29, 2024 + + .. change:: + :tags: bug, general + :tickets: 11417 + :versions: 2.0.31 + + Set up full Python 3.13 support to the extent currently possible, repairing + issues within internal language helpers as well as the serializer extension + module. + + For version 1.4, this also modernizes the "extras" names in setup.cfg + to use dashes and not underscores for two-word names. Underscore names + are still present to accommodate potential compatibility issues. + + .. change:: + :tags: bug, sql + :tickets: 11471 + :versions: 2.0.31 + + Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method + of the :class:`.TextualSelect` construct would not set a correct cache key + which distinguished between different CTE expressions. + + .. change:: + :tags: bug, engine + :tickets: 11499 + + Adjustments to the C extensions, which are specific to the SQLAlchemy 1.x + series, to work under Python 3.13. Pull request courtesy Ben Beasley. + + .. change:: + :tags: bug, mssql + :tickets: 11514 + :versions: 2.0.32 + + Fixed issue where SQL Server drivers don't support bound parameters when + rendering the "frame specification" for a window function, e.g. "ROWS + BETWEEN", etc. + + + .. change:: + :tags: bug, sql + :tickets: 11544 + :versions: 2.0 + + Fixed caching issue where the + :paramref:`_sql.Select.with_for_update.key_share` element of + :meth:`_sql.Select.with_for_update` was not considered as part of the cache + key, leading to incorrect caching if different variations of this parameter + were used with an otherwise identical statement. + + .. change:: + :tags: bug, orm, regression + :tickets: 11562 + :versions: 2.0.32 + + Fixed regression going back to 1.4 where accessing a collection using the + "dynamic" strategy on a transient object and attempting to query would + raise an internal error rather than the expected :class:`.NoResultFound` + that occurred in 1.3. + + .. change:: + :tags: bug, reflection, sqlite + :tickets: 11582 + :versions: 2.0.32 + + Fixed reflection of computed column in SQLite to properly account + for complex expressions. + + .. change:: + :tags: usecase, engine + :versions: 2.0.31 + + Modified the internal representation used for adapting asyncio calls to + greenlets to allow for duck-typed compatibility with third party libraries + that implement SQLAlchemy's "greenlet-to-asyncio" pattern directly. + Running code within a greenlet that features the attribute + ``__sqlalchemy_greenlet_provider__ = True`` will allow calls to + :func:`sqlalchemy.util.await_only` directly. + + + .. change:: + :tags: bug, mypy + :versions: 2.0.32 + + The deprecated mypy plugin is no longer fully functional with the latest + series of mypy 1.11.0, as changes in the mypy interpreter are no longer + compatible with the approach used by the plugin. If code is dependent on + the mypy plugin with sqlalchemy2-stubs, it's recommended to pin mypy to be + below the 1.11.0 series. Seek upgrading to the 2.0 series of SQLAlchemy + and migrating to the modern type annotations. + + .. seealso:: + + :ref:`mypy_toplevel` .. changelog:: :version: 1.4.52 diff --git a/doc/build/changelog/unreleased_14/11417.rst b/doc/build/changelog/unreleased_14/11417.rst deleted file mode 100644 index b37af43e3d3..00000000000 --- a/doc/build/changelog/unreleased_14/11417.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. change:: - :tags: bug, general - :tickets: 11417 - :versions: 2.0.31 - - Set up full Python 3.13 support to the extent currently possible, repairing - issues within internal language helpers as well as the serializer extension - module. - - For version 1.4, this also modernizes the "extras" names in setup.cfg - to use dashes and not underscores for two-word names. Underscore names - are still present to accommodate potential compatibility issues. diff --git a/doc/build/changelog/unreleased_14/11471.rst b/doc/build/changelog/unreleased_14/11471.rst deleted file mode 100644 index 47fda837575..00000000000 --- a/doc/build/changelog/unreleased_14/11471.rst +++ /dev/null @@ -1,8 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 11471 - :versions: 2.0.31 - - Fixed caching issue where using the :meth:`.TextualSelect.add_cte` method - of the :class:`.TextualSelect` construct would not set a correct cache key - which distinguished between different CTE expressions. diff --git a/doc/build/changelog/unreleased_14/11499.rst b/doc/build/changelog/unreleased_14/11499.rst deleted file mode 100644 index e03062c1911..00000000000 --- a/doc/build/changelog/unreleased_14/11499.rst +++ /dev/null @@ -1,6 +0,0 @@ -.. change:: - :tags: bug, engine - :tickets: 11499 - - Adjustments to the C extensions, which are specific to the SQLAlchemy 1.x - series, to work under Python 3.13. Pull request courtesy Ben Beasley. diff --git a/doc/build/changelog/unreleased_14/11514.rst b/doc/build/changelog/unreleased_14/11514.rst deleted file mode 100644 index 145f87f4384..00000000000 --- a/doc/build/changelog/unreleased_14/11514.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, mssql - :tickets: 11514 - :versions: 2.0.32 - - Fixed issue where SQL Server drivers don't support bound parameters when - rendering the "frame specification" for a window function, e.g. "ROWS - BETWEEN", etc. - diff --git a/doc/build/changelog/unreleased_14/11544.rst b/doc/build/changelog/unreleased_14/11544.rst deleted file mode 100644 index 6bc3b9705f4..00000000000 --- a/doc/build/changelog/unreleased_14/11544.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. change:: - :tags: bug, sql - :tickets: 11544 - :versions: 2.0 - - Fixed caching issue where the - :paramref:`_sql.Select.with_for_update.key_share` element of - :meth:`_sql.Select.with_for_update` was not considered as part of the cache - key, leading to incorrect caching if different variations of this parameter - were used with an otherwise identical statement. diff --git a/doc/build/changelog/unreleased_14/11562.rst b/doc/build/changelog/unreleased_14/11562.rst deleted file mode 100644 index beaad363351..00000000000 --- a/doc/build/changelog/unreleased_14/11562.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, orm, regression - :tickets: 11562 - :versions: 2.0.32 - - Fixed regression going back to 1.4 where accessing a collection using the - "dynamic" strategy on a transient object and attempting to query would - raise an internal error rather than the expected :class:`.NoResultFound` - that occurred in 1.3. diff --git a/doc/build/changelog/unreleased_14/11582.rst b/doc/build/changelog/unreleased_14/11582.rst deleted file mode 100644 index 6a2009cbae4..00000000000 --- a/doc/build/changelog/unreleased_14/11582.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: bug, reflection, sqlite - :tickets: 11582 - :versions: 2.0.32 - - Fixed reflection of computed column in SQLite to properly account - for complex expressions. diff --git a/doc/build/changelog/unreleased_14/greenlet_compat.rst b/doc/build/changelog/unreleased_14/greenlet_compat.rst deleted file mode 100644 index 95ce98113df..00000000000 --- a/doc/build/changelog/unreleased_14/greenlet_compat.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. change:: - :tags: usecase, engine - :versions: 2.0.31 - - Modified the internal representation used for adapting asyncio calls to - greenlets to allow for duck-typed compatibility with third party libraries - that implement SQLAlchemy's "greenlet-to-asyncio" pattern directly. - Running code within a greenlet that features the attribute - ``__sqlalchemy_greenlet_provider__ = True`` will allow calls to - :func:`sqlalchemy.util.await_only` directly. - diff --git a/doc/build/changelog/unreleased_14/mypy1110.rst b/doc/build/changelog/unreleased_14/mypy1110.rst deleted file mode 100644 index 3f1fe05ce2d..00000000000 --- a/doc/build/changelog/unreleased_14/mypy1110.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, mypy - :versions: 2.0.32 - - The deprecated mypy plugin is no longer fully functional with the latest - series of mypy 1.11.0, as changes in the mypy interpreter are no longer - compatible with the approach used by the plugin. If code is dependent on - the mypy plugin with sqlalchemy2-stubs, it's recommended to pin mypy to be - below the 1.11.0 series. Seek upgrading to the 2.0 series of SQLAlchemy - and migrating to the modern type annotations. - - .. seealso:: - - :ref:`mypy_toplevel` diff --git a/doc/build/conf.py b/doc/build/conf.py index 508116a5608..4fe12c6a025 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -221,9 +221,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.52" +release = "1.4.53" -release_date = "March 4, 2024" +release_date = "July 29, 2024" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From 3f38fe72c5d3be915d3f5bfb6d1ee8d6cfaa3f0a Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Jul 2024 12:34:35 -0400 Subject: [PATCH 610/632] Version 1.4.54 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 8532d76967e..c19ef965f88 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.54 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.53 :released: July 29, 2024 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index a1433d2f098..ef3ad61f3cc 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.53" +__version__ = "1.4.54" def __go(lcls): From 9824b3ab5aa1741b2a603b3d6aeed00d18b0c0ef Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Mon, 29 Jul 2024 21:53:47 +0200 Subject: [PATCH 611/632] fix wheel pipeline Change-Id: Ib38c1fc369be77ebb27504a0b9ca0f6778368d0f (cherry picked from commit fbf35e9b71e744079b65a717f00b126707cc9b36) --- .github/workflows/create-wheels.yaml | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index cb6d16a7335..0e8d5b1a102 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -64,15 +64,12 @@ jobs: (cat setup.cfg) | %{$_ -replace "tag_build.?=.?dev",""} | set-content setup.cfg - name: Create wheel - # create the wheel using --no-use-pep517 since locally we have pyproject - # this flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies run: | python -m pip install --upgrade pip pip --version - pip install 'setuptools>=44' 'wheel>=0.34' pip list - pip wheel -w dist --no-use-pep517 -v --no-deps . + pip wheel -w dist -v --no-deps . - name: Install wheel # install the created wheel without using the pypi index @@ -209,11 +206,8 @@ jobs: with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Create wheel for manylinux2014 for py3 if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} @@ -225,11 +219,8 @@ jobs: with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Create wheel for manylinux py2 if: ${{ matrix.python-version == 'cp27-cp27m' || matrix.python-version == 'cp27-cp27mu' }} @@ -241,11 +232,8 @@ jobs: with: # python-versions is the output of the previous step and is in the form -. Eg cp27-cp27mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Set up Python if: ${{ matrix.python-version != 'cp27-cp27m' && matrix.python-version != 'cp27-cp27mu' }} @@ -385,11 +373,8 @@ jobs: with: # python-versions is the output of the previous step and is in the form -. Eg cp37-cp37mu python-versions: ${{ matrix.python-version }} - build-requirements: "setuptools>=44 wheel>=0.34" - # Create the wheel using --no-use-pep517 since locally we have pyproject - # This flag should be removed once sqlalchemy supports pep517 # `--no-deps` is used to only generate the wheel for the current library. Redundant in sqlalchemy since it has no dependencies - pip-wheel-args: "-w ./dist --no-use-pep517 -v --no-deps" + pip-wheel-args: "-w ./dist -v --no-deps" - name: Check created wheel # check that the wheel is compatible with the current installation. From b2c94371d8ed85005ca44a26c0f7eb337124760e Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 11 Aug 2024 15:41:36 -0400 Subject: [PATCH 612/632] turn off pyodbc pooling new updates of unixodbc are turning this on in CI revealing that our isolation level tests assume no pooling takes place, so disable this, which is only at global module level for pyodbc Change-Id: I971dfddc90d248281e8ca8677a3a41af6de28b86 (cherry picked from commit 896dbdb5920ffb645a8948c254f73dd0fcb0d3c0) --- lib/sqlalchemy/dialects/mssql/provision.py | 7 +++++++ lib/sqlalchemy/testing/provision.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/mssql/provision.py b/lib/sqlalchemy/dialects/mssql/provision.py index dd001da2467..20c39f0d11f 100644 --- a/lib/sqlalchemy/dialects/mssql/provision.py +++ b/lib/sqlalchemy/dialects/mssql/provision.py @@ -18,10 +18,17 @@ from ...testing.provision import drop_db from ...testing.provision import get_temp_table_name from ...testing.provision import log +from ...testing.provision import post_configure_engine from ...testing.provision import run_reap_dbs from ...testing.provision import temp_table_keyword_args +@post_configure_engine.for_db("mssql") +def post_configure_engine(url, engine, follower_ident): + if engine.driver == "pyodbc": + engine.dialect.dbapi.pooling = False + + @create_db.for_db("mssql") def _mssql_create_db(cfg, eng, ident): with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn: diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py index 56c1be2518a..eea9c66dbca 100644 --- a/lib/sqlalchemy/testing/provision.py +++ b/lib/sqlalchemy/testing/provision.py @@ -314,7 +314,7 @@ def update_db_opts(db_url, db_opts): def post_configure_engine(url, engine, follower_ident): """Perform extra steps after configuring an engine for testing. - (For the internal dialects, currently only used by sqlite, oracle) + (For the internal dialects, currently only used by sqlite, oracle, mssql) """ pass From 3b5f0fcfbe5fe6bca5566d4a37b28cfe2bd89f8d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 12 Aug 2024 19:50:05 -0400 Subject: [PATCH 613/632] de-memoize _proxy_key when new annotations are added Fixed regression from 1.3 where the column key used for a hybrid property might be populated with that of the underlying column that it returns, for a property that returns an ORM mapped column directly, rather than the key used by the hybrid property itself. Fixes: #11728 Change-Id: Ifb298e46a20f90f6b6a717674f142a87cbceb468 (cherry picked from commit ffc7e8d73b30ea45fb03e0727b9fe96b6b8d4cfa) (cherry picked from commit 5b1758a9bb8952adca91a95483ec1d11a66ad1e2) --- doc/build/changelog/unreleased_14/11728.rst | 9 ++++ lib/sqlalchemy/sql/elements.py | 9 +++- test/ext/test_hybrid.py | 49 +++++++++++++++++++++ 3 files changed, 66 insertions(+), 1 deletion(-) create mode 100644 doc/build/changelog/unreleased_14/11728.rst diff --git a/doc/build/changelog/unreleased_14/11728.rst b/doc/build/changelog/unreleased_14/11728.rst new file mode 100644 index 00000000000..b27aa3333d7 --- /dev/null +++ b/doc/build/changelog/unreleased_14/11728.rst @@ -0,0 +1,9 @@ +.. change:: + :tags: bug, regression, orm + :tickets: 11728 + :versions: 2.0.33 + + Fixed regression from 1.3 where the column key used for a hybrid property + might be populated with that of the underlying column that it returns, for + a property that returns an ORM mapped column directly, rather than the key + used by the hybrid property itself. diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 7671e75d487..96f2936fe78 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -5283,7 +5283,14 @@ def __init__(self, element, values): def _with_annotations(self, values): clone = super(AnnotatedColumnElement, self)._with_annotations(values) - clone.__dict__.pop("comparator", None) + for attr in ( + "comparator", + "_proxy_key", + "_tq_key_label", + "_tq_label", + "_non_anon_label", + ): + clone.__dict__.pop(attr, None) return clone @util.memoized_property diff --git a/test/ext/test_hybrid.py b/test/ext/test_hybrid.py index be42fdb6d0e..caee584a0e7 100644 --- a/test/ext/test_hybrid.py +++ b/test/ext/test_hybrid.py @@ -5,6 +5,7 @@ from sqlalchemy import func from sqlalchemy import inspect from sqlalchemy import Integer +from sqlalchemy import LABEL_STYLE_DISAMBIGUATE_ONLY from sqlalchemy import LABEL_STYLE_TABLENAME_PLUS_COL from sqlalchemy import literal_column from sqlalchemy import Numeric @@ -322,6 +323,21 @@ def name(self): return A + @testing.fixture + def _unnamed_expr_matches_col_fixture(self): + Base = declarative_base() + + class A(Base): + __tablename__ = "a" + id = Column(Integer, primary_key=True) + foo = Column(String) + + @hybrid.hybrid_property + def bar(self): + return self.foo + + return A + def test_labeling_for_unnamed(self, _unnamed_expr_fixture): A = _unnamed_expr_fixture @@ -341,6 +357,39 @@ def test_labeling_for_unnamed(self, _unnamed_expr_fixture): "a.lastname AS name FROM a) AS anon_1", ) + @testing.variation("pre_populate_col_proxy", [True, False]) + def test_labeling_for_unnamed_matches_col( + self, _unnamed_expr_matches_col_fixture, pre_populate_col_proxy + ): + """test #11728""" + + A = _unnamed_expr_matches_col_fixture + + if pre_populate_col_proxy: + pre_stmt = select(A.id, A.foo) + pre_stmt.subquery().c + + stmt = select(A.id, A.bar) + self.assert_compile( + stmt, + "SELECT a.id, a.foo FROM a", + ) + + compile_state = stmt._compile_state_factory(stmt, None) + eq_( + compile_state._column_naming_convention( + LABEL_STYLE_DISAMBIGUATE_ONLY, legacy=False + )(list(stmt.inner_columns)[1]), + "bar", + ) + eq_(stmt.subquery().c.keys(), ["id", "bar"]) + + self.assert_compile( + select(stmt.subquery()), + "SELECT anon_1.id, anon_1.foo FROM " + "(SELECT a.id AS id, a.foo AS foo FROM a) AS anon_1", + ) + def test_labeling_for_unnamed_tablename_plus_col( self, _unnamed_expr_fixture ): From 5a9882a56ead5152cacdebfee9b7789879b9ec05 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 2 Sep 2024 10:37:29 -0400 Subject: [PATCH 614/632] dont erase transaction if rollback/commit failed outside of asyncpg Fixed critical issue in the asyncpg driver where a rollback or commit that fails specifically for the ``MissingGreenlet`` condition or any other error that is not raised by asyncpg itself would discard the asyncpg transaction in any case, even though the transaction were still idle, leaving to a server side condition with an idle transaction that then goes back into the connection pool. The flags for "transaction closed" are now not reset for errors that are raised outside of asyncpg itself. When asyncpg itself raises an error for ``.commit()`` or ``.rollback()``, asyncpg does then discard of this transaction. Fixes: #11819 Change-Id: I12f0532788b03ea63fb47a7af21e07c37effb070 (cherry picked from commit a1f220cb4d1a04412a53200f454fbfc706e136b3) (cherry picked from commit ca69db7e1ff6dabbbd57b1bca3387d0321da19a5) --- doc/build/changelog/unreleased_14/11819.rst | 14 ++++++ lib/sqlalchemy/dialects/postgresql/asyncpg.py | 36 ++++++++++--- test/dialect/postgresql/test_async_pg_py3k.py | 50 +++++++++++++++++++ 3 files changed, 92 insertions(+), 8 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11819.rst diff --git a/doc/build/changelog/unreleased_14/11819.rst b/doc/build/changelog/unreleased_14/11819.rst new file mode 100644 index 00000000000..6211eb487ee --- /dev/null +++ b/doc/build/changelog/unreleased_14/11819.rst @@ -0,0 +1,14 @@ +.. change:: + :tags: bug, postgresql + :tickets: 11819 + :versions: 2.0.33, 1.4.54 + + Fixed critical issue in the asyncpg driver where a rollback or commit that + fails specifically for the ``MissingGreenlet`` condition or any other error + that is not raised by asyncpg itself would discard the asyncpg transaction + in any case, even though the transaction were still idle, leaving to a + server side condition with an idle transaction that then goes back into the + connection pool. The flags for "transaction closed" are now not reset for + errors that are raised outside of asyncpg itself. When asyncpg itself + raises an error for ``.commit()`` or ``.rollback()``, asyncpg does then + discard of this transaction. diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 84e2998a4a6..968dfbd3be5 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -723,25 +723,45 @@ def cursor(self, server_side=False): else: return AsyncAdapt_asyncpg_cursor(self) + async def _rollback_and_discard(self): + try: + await self._transaction.rollback() + finally: + # if asyncpg .rollback() was actually called, then whether or + # not it raised or succeeded, the transation is done, discard it + self._transaction = None + self._started = False + + async def _commit_and_discard(self): + try: + await self._transaction.commit() + finally: + # if asyncpg .commit() was actually called, then whether or + # not it raised or succeeded, the transation is done, discard it + self._transaction = None + self._started = False + def rollback(self): if self._started: try: - self.await_(self._transaction.rollback()) - except Exception as error: - self._handle_exception(error) - finally: + self.await_(self._rollback_and_discard()) self._transaction = None self._started = False + except Exception as error: + # don't dereference asyncpg transaction if we didn't + # actually try to call rollback() on it + self._handle_exception(error) def commit(self): if self._started: try: - self.await_(self._transaction.commit()) - except Exception as error: - self._handle_exception(error) - finally: + self.await_(self._commit_and_discard()) self._transaction = None self._started = False + except Exception as error: + # don't dereference asyncpg transaction if we didn't + # actually try to call commit() on it + self._handle_exception(error) def close(self): self.rollback() diff --git a/test/dialect/postgresql/test_async_pg_py3k.py b/test/dialect/postgresql/test_async_pg_py3k.py index 782cf33dd39..a7a8af1576c 100644 --- a/test/dialect/postgresql/test_async_pg_py3k.py +++ b/test/dialect/postgresql/test_async_pg_py3k.py @@ -12,6 +12,7 @@ from sqlalchemy.dialects.postgresql import ENUM from sqlalchemy.testing import async_test from sqlalchemy.testing import eq_ +from sqlalchemy.testing import expect_raises from sqlalchemy.testing import fixtures from sqlalchemy.testing import mock @@ -165,6 +166,55 @@ async def async_setup(engine, enums): ], ) + @testing.variation("trans", ["commit", "rollback"]) + @async_test + async def test_dont_reset_open_transaction( + self, trans, async_testing_engine + ): + """test for #11819""" + + engine = async_testing_engine() + + control_conn = await engine.connect() + await control_conn.execution_options(isolation_level="AUTOCOMMIT") + + conn = await engine.connect() + txid_current = ( + await conn.exec_driver_sql("select txid_current()") + ).scalar() + + with expect_raises(exc.MissingGreenlet): + if trans.commit: + conn.sync_connection.connection.dbapi_connection.commit() + elif trans.rollback: + conn.sync_connection.connection.dbapi_connection.rollback() + else: + trans.fail() + + trans_exists = ( + await control_conn.exec_driver_sql( + f"SELECT count(*) FROM pg_stat_activity " + f"where backend_xid={txid_current}" + ) + ).scalar() + eq_(trans_exists, 1) + + if trans.commit: + await conn.commit() + elif trans.rollback: + await conn.rollback() + else: + trans.fail() + + trans_exists = ( + await control_conn.exec_driver_sql( + f"SELECT count(*) FROM pg_stat_activity " + f"where backend_xid={txid_current}" + ) + ).scalar() + eq_(trans_exists, 0) + await engine.dispose() + @async_test async def test_failed_commit_recover(self, metadata, async_testing_engine): From 8dce4730223f87e3dcaaf9ac6ca4366ed9b4a2a8 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 4 Sep 2024 19:10:31 +0200 Subject: [PATCH 615/632] fix typo in `elect.slice` docs Change-Id: I859b48e320a04cedc6084d067cb20b89ac5d76bb (cherry picked from commit 6d0379f0565db1b6bf3aa7bead44d759407abadc) --- lib/sqlalchemy/sql/selectable.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index 7536665a6ea..cbec34d727e 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -3833,7 +3833,7 @@ def slice(self, start, stop): For example, :: - stmt = select(User).order_by(User).id.slice(1, 3) + stmt = select(User).order_by(User.id).slice(1, 3) renders as From 2b04a6c12a58b550aea2fe462674a812b0024c04 Mon Sep 17 00:00:00 2001 From: Jimmy AUDEBERT <109511155+jaudebert@users.noreply.github.com> Date: Wed, 4 Sep 2024 19:23:53 +0200 Subject: [PATCH 616/632] Include operators in postgres JSONB documentation (#11828) (cherry picked from commit 06ca61066ee312a5198cf1db869f388255212559) Change-Id: I3e432a8b14309314f4c56d43841cba464518b125 --- lib/sqlalchemy/dialects/postgresql/json.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index e6b6f58677e..a72a6781d92 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -294,22 +294,27 @@ class Comparator(JSON.Comparator): """Define comparison operations for :class:`_types.JSON`.""" def has_key(self, other): - """Boolean expression. Test for presence of a key. Note that the - key may be a SQLA expression. + """Boolean expression. Test for presence of a key (equivalent of + the ``?`` operator). Note that the key may be a SQLA expression. """ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean) def has_all(self, other): - """Boolean expression. Test for presence of all keys in jsonb""" + """Boolean expression. Test for presence of all keys in jsonb + (equivalent of the ``?&`` operator) + """ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean) def has_any(self, other): - """Boolean expression. Test for presence of any key in jsonb""" + """Boolean expression. Test for presence of any key in jsonb + (equivalent of the ``?|`` operator) + """ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean) def contains(self, other, **kwargs): """Boolean expression. Test if keys (or array) are a superset - of/contained the keys of the argument jsonb expression. + of/contained the keys of the argument jsonb expression + (equivalent of the ``@>`` operator). kwargs may be ignored by this operator but are required for API conformance. @@ -318,7 +323,8 @@ def contains(self, other, **kwargs): def contained_by(self, other): """Boolean expression. Test if keys are a proper subset of the - keys of the argument jsonb expression. + keys of the argument jsonb expression + (equivalent of the ``<@`` operator). """ return self.operate( CONTAINED_BY, other, result_type=sqltypes.Boolean From 1a24a15fe9111275bdac6ecca690c1cfb97eb075 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 5 Sep 2024 10:06:36 -0400 Subject: [PATCH 617/632] move py313 tests to greenlet main vstinner's branch merged and was immediately deleted from that repo. greenlet still not released. so keep on chasing it :/ Change-Id: I79927061566db75b4e26b3dbc39b817786531db6 (cherry picked from commit 88dd18cd89598d0569d761db206d4559e8cd57be) (cherry picked from commit 4a72df72cd2bb890034a9843b27a51e662ceccf7) --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index d8511d3bdad..3adbe51dbd3 100644 --- a/tox.ini +++ b/tox.ini @@ -47,7 +47,7 @@ deps= mock; python_version < '3.3' - py313: git+https://github.com/vstinner/greenlet@py313\#egg=greenlet + py313: git+https://github.com/python-greenlet/greenlet.git\#egg=greenlet dbapimain-sqlite: git+https://github.com/omnilib/aiosqlite.git\#egg=aiosqlite dbapimain-sqlite: git+https://github.com/coleifer/sqlcipher3.git\#egg=sqlcipher3 From e9d2e8f89729837c4c8bc6af7f3c583ccc737406 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 5 Sep 2024 09:44:32 -0400 Subject: [PATCH 618/632] remove setuptools test, backport issue #11818 to support our release tools we need to fully move 1.4 into modern setuptools territory in order to continue releasing. Fixes: #11818 Change-Id: Idb512a4990b002062f0c02ad22ee488c97c18ef4 --- doc/build/changelog/unreleased_14/11818.rst | 18 ++++++++++++++++++ .../unreleased_14/remove_testcommand.rst | 7 +++++++ pyproject.toml | 4 +--- setup.py | 19 ------------------- 4 files changed, 26 insertions(+), 22 deletions(-) create mode 100644 doc/build/changelog/unreleased_14/11818.rst create mode 100644 doc/build/changelog/unreleased_14/remove_testcommand.rst diff --git a/doc/build/changelog/unreleased_14/11818.rst b/doc/build/changelog/unreleased_14/11818.rst new file mode 100644 index 00000000000..c71d299ff1e --- /dev/null +++ b/doc/build/changelog/unreleased_14/11818.rst @@ -0,0 +1,18 @@ +.. change:: + :tags: change, general + :tickets: 11818 + :versions: 2.0.33 1.4.54 + + The pin for ``setuptools<69.3`` in ``pyproject.toml`` has been removed. + This pin was to prevent a sudden change in setuptools to use :pep:`625` + from taking place, which would change the file name of SQLAlchemy's source + distribution on pypi to be an all lower case name, which is likely to cause + problems with various build environments that expected the previous naming + style. However, the presence of this pin is holding back environments that + otherwise want to use a newer setuptools, so we've decided to move forward + with this change, with the assumption that build environments will have + largely accommodated the setuptools change by now. + + This change was first released in version 2.0.33 however is being + backported to 1.4.54 to support ongoing releases. + diff --git a/doc/build/changelog/unreleased_14/remove_testcommand.rst b/doc/build/changelog/unreleased_14/remove_testcommand.rst new file mode 100644 index 00000000000..61c89d912c5 --- /dev/null +++ b/doc/build/changelog/unreleased_14/remove_testcommand.rst @@ -0,0 +1,7 @@ +.. change:: + :tags: change, general + + The setuptools "test" command is removed from the 1.4 series as modern + versions of setuptools actively refuse to accommodate this extension being + present. This change was already part of the 2.0 series. To run the + test suite use the ``tox`` command. diff --git a/pyproject.toml b/pyproject.toml index 891120ab7e0..6a93e957332 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,7 @@ [build-system] build-backend = "setuptools.build_meta" requires = [ - # avoid moving to https://github.com/pypa/setuptools/issues/3593 - # until we're ready - "setuptools>=44,<69.3", + "setuptools>=44", ] [tool.black] diff --git a/setup.py b/setup.py index f1a1cacba36..243c9696704 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,6 @@ from setuptools import Extension from setuptools import setup from setuptools.command.build_ext import build_ext -from setuptools.command.test import test as TestCommand # attempt to use pep-632 imports for setuptools symbols; however, # since these symbols were only added to setuptools as of 59.0.1, @@ -95,24 +94,6 @@ def has_ext_modules(self): return True -class UseTox(TestCommand): - RED = 31 - RESET_SEQ = "\033[0m" - BOLD_SEQ = "\033[1m" - COLOR_SEQ = "\033[1;%dm" - - def run_tests(self): - sys.stderr.write( - "%s%spython setup.py test is deprecated by pypa. Please invoke " - "'tox' with no arguments for a basic test run.\n%s" - % (self.COLOR_SEQ % self.RED, self.BOLD_SEQ, self.RESET_SEQ) - ) - sys.exit(1) - - -cmdclass["test"] = UseTox - - def status_msgs(*msgs): print("*" * 75) for msg in msgs: From dfe401fd471323638ad3e1b1027d89e01fcbeef5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 5 Sep 2024 11:50:20 -0400 Subject: [PATCH 619/632] - 1.4.54 --- doc/build/changelog/changelog_14.rst | 54 ++++++++++++++++++- doc/build/changelog/unreleased_14/11728.rst | 9 ---- doc/build/changelog/unreleased_14/11818.rst | 18 ------- doc/build/changelog/unreleased_14/11819.rst | 14 ----- .../unreleased_14/remove_testcommand.rst | 7 --- doc/build/conf.py | 4 +- 6 files changed, 55 insertions(+), 51 deletions(-) delete mode 100644 doc/build/changelog/unreleased_14/11728.rst delete mode 100644 doc/build/changelog/unreleased_14/11818.rst delete mode 100644 doc/build/changelog/unreleased_14/11819.rst delete mode 100644 doc/build/changelog/unreleased_14/remove_testcommand.rst diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index c19ef965f88..27d7743f6e1 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -15,7 +15,59 @@ This document details individual issue-level changes made throughout .. changelog:: :version: 1.4.54 - :include_notes_from: unreleased_14 + :released: September 5, 2024 + + .. change:: + :tags: bug, regression, orm + :tickets: 11728 + :versions: 2.0.33 + + Fixed regression from 1.3 where the column key used for a hybrid property + might be populated with that of the underlying column that it returns, for + a property that returns an ORM mapped column directly, rather than the key + used by the hybrid property itself. + + .. change:: + :tags: change, general + :tickets: 11818 + :versions: 2.0.33 1.4.54 + + The pin for ``setuptools<69.3`` in ``pyproject.toml`` has been removed. + This pin was to prevent a sudden change in setuptools to use :pep:`625` + from taking place, which would change the file name of SQLAlchemy's source + distribution on pypi to be an all lower case name, which is likely to cause + problems with various build environments that expected the previous naming + style. However, the presence of this pin is holding back environments that + otherwise want to use a newer setuptools, so we've decided to move forward + with this change, with the assumption that build environments will have + largely accommodated the setuptools change by now. + + This change was first released in version 2.0.33 however is being + backported to 1.4.54 to support ongoing releases. + + + .. change:: + :tags: bug, postgresql + :tickets: 11819 + :versions: 2.0.33, 1.4.54 + + Fixed critical issue in the asyncpg driver where a rollback or commit that + fails specifically for the ``MissingGreenlet`` condition or any other error + that is not raised by asyncpg itself would discard the asyncpg transaction + in any case, even though the transaction were still idle, leaving to a + server side condition with an idle transaction that then goes back into the + connection pool. The flags for "transaction closed" are now not reset for + errors that are raised outside of asyncpg itself. When asyncpg itself + raises an error for ``.commit()`` or ``.rollback()``, asyncpg does then + discard of this transaction. + + .. change:: + :tags: change, general + + The setuptools "test" command is removed from the 1.4 series as modern + versions of setuptools actively refuse to accommodate this extension being + present. This change was already part of the 2.0 series. To run the + test suite use the ``tox`` command. .. changelog:: :version: 1.4.53 diff --git a/doc/build/changelog/unreleased_14/11728.rst b/doc/build/changelog/unreleased_14/11728.rst deleted file mode 100644 index b27aa3333d7..00000000000 --- a/doc/build/changelog/unreleased_14/11728.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. change:: - :tags: bug, regression, orm - :tickets: 11728 - :versions: 2.0.33 - - Fixed regression from 1.3 where the column key used for a hybrid property - might be populated with that of the underlying column that it returns, for - a property that returns an ORM mapped column directly, rather than the key - used by the hybrid property itself. diff --git a/doc/build/changelog/unreleased_14/11818.rst b/doc/build/changelog/unreleased_14/11818.rst deleted file mode 100644 index c71d299ff1e..00000000000 --- a/doc/build/changelog/unreleased_14/11818.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. change:: - :tags: change, general - :tickets: 11818 - :versions: 2.0.33 1.4.54 - - The pin for ``setuptools<69.3`` in ``pyproject.toml`` has been removed. - This pin was to prevent a sudden change in setuptools to use :pep:`625` - from taking place, which would change the file name of SQLAlchemy's source - distribution on pypi to be an all lower case name, which is likely to cause - problems with various build environments that expected the previous naming - style. However, the presence of this pin is holding back environments that - otherwise want to use a newer setuptools, so we've decided to move forward - with this change, with the assumption that build environments will have - largely accommodated the setuptools change by now. - - This change was first released in version 2.0.33 however is being - backported to 1.4.54 to support ongoing releases. - diff --git a/doc/build/changelog/unreleased_14/11819.rst b/doc/build/changelog/unreleased_14/11819.rst deleted file mode 100644 index 6211eb487ee..00000000000 --- a/doc/build/changelog/unreleased_14/11819.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. change:: - :tags: bug, postgresql - :tickets: 11819 - :versions: 2.0.33, 1.4.54 - - Fixed critical issue in the asyncpg driver where a rollback or commit that - fails specifically for the ``MissingGreenlet`` condition or any other error - that is not raised by asyncpg itself would discard the asyncpg transaction - in any case, even though the transaction were still idle, leaving to a - server side condition with an idle transaction that then goes back into the - connection pool. The flags for "transaction closed" are now not reset for - errors that are raised outside of asyncpg itself. When asyncpg itself - raises an error for ``.commit()`` or ``.rollback()``, asyncpg does then - discard of this transaction. diff --git a/doc/build/changelog/unreleased_14/remove_testcommand.rst b/doc/build/changelog/unreleased_14/remove_testcommand.rst deleted file mode 100644 index 61c89d912c5..00000000000 --- a/doc/build/changelog/unreleased_14/remove_testcommand.rst +++ /dev/null @@ -1,7 +0,0 @@ -.. change:: - :tags: change, general - - The setuptools "test" command is removed from the 1.4 series as modern - versions of setuptools actively refuse to accommodate this extension being - present. This change was already part of the 2.0 series. To run the - test suite use the ``tox`` command. diff --git a/doc/build/conf.py b/doc/build/conf.py index 4fe12c6a025..c3aa5521a77 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -221,9 +221,9 @@ # The short X.Y version. version = "1.4" # The full version, including alpha/beta/rc tags. -release = "1.4.53" +release = "1.4.54" -release_date = "July 29, 2024" +release_date = "September 5, 2024" site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org") site_adapter_template = "docs_adapter.mako" From b3b2bd2f47b1219344e8222b885f6bd1ca89d269 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 5 Sep 2024 11:54:33 -0400 Subject: [PATCH 620/632] Version 1.4.55 placeholder --- doc/build/changelog/changelog_14.rst | 4 ++++ lib/sqlalchemy/__init__.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/build/changelog/changelog_14.rst b/doc/build/changelog/changelog_14.rst index 27d7743f6e1..0b4de1a55f3 100644 --- a/doc/build/changelog/changelog_14.rst +++ b/doc/build/changelog/changelog_14.rst @@ -13,6 +13,10 @@ This document details individual issue-level changes made throughout :start-line: 5 +.. changelog:: + :version: 1.4.55 + :include_notes_from: unreleased_14 + .. changelog:: :version: 1.4.54 :released: September 5, 2024 diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index ef3ad61f3cc..695b25dd8ca 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -131,7 +131,7 @@ from .types import VARCHAR -__version__ = "1.4.54" +__version__ = "1.4.55" def __go(lcls): From 648b7b99c1f57a0cbf5c2112a376d151360a1a8a Mon Sep 17 00:00:00 2001 From: Yunus Koning Date: Tue, 17 Sep 2024 20:05:09 +0200 Subject: [PATCH 621/632] update MonetDB dialect information (#11884) (cherry picked from commit 8da20140fe2d57584211d85de589cbce7172a2da) --- doc/build/dialects/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index 47d6f50de84..6f297124776 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -119,7 +119,7 @@ Currently maintained external dialect projects for SQLAlchemy include: +------------------------------------------------+---------------------------------------+ | Microsoft SQL Server (via turbodbc) | sqlalchemy-turbodbc_ | +------------------------------------------------+---------------------------------------+ -| MonetDB [1]_ | sqlalchemy-monetdb_ | +| MonetDB | sqlalchemy-monetdb_ | +------------------------------------------------+---------------------------------------+ | OpenGauss | openGauss-sqlalchemy_ | +------------------------------------------------+---------------------------------------+ @@ -149,7 +149,7 @@ Currently maintained external dialect projects for SQLAlchemy include: .. _sqlalchemy-solr: https://github.com/aadel/sqlalchemy-solr .. _sqlalchemy_exasol: https://github.com/blue-yonder/sqlalchemy_exasol .. _sqlalchemy-sqlany: https://github.com/sqlanywhere/sqlalchemy-sqlany -.. _sqlalchemy-monetdb: https://github.com/gijzelaerr/sqlalchemy-monetdb +.. _sqlalchemy-monetdb: https://github.com/MonetDB/sqlalchemy-monetdb .. _snowflake-sqlalchemy: https://github.com/snowflakedb/snowflake-sqlalchemy .. _sqlalchemy-tds: https://github.com/m32/sqlalchemy-tds .. _sqlalchemy-cratedb: https://github.com/crate/sqlalchemy-cratedb From f620e2aa3db6d520b11471fbf5df8d7740e0e7e7 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 17 Sep 2024 20:22:11 +0200 Subject: [PATCH 622/632] Remove test warning in python 3.13 Change-Id: Ib098754ef6d157e8dd1eac32b3cb114a9ca66e4a (cherry picked from commit 0cca754f2101cf0e63f0c67b0220e7a4eb3a0f9c) --- lib/sqlalchemy/util/__init__.py | 1 + lib/sqlalchemy/util/compat.py | 1 + test/ext/test_extendedattr.py | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 544f4c06f63..289e191cf83 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -81,6 +81,7 @@ from .compat import py310 from .compat import py311 from .compat import py312 +from .compat import py313 from .compat import py37 from .compat import py38 from .compat import py39 diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index 81e8dbf475a..d70cc82ffd7 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -14,6 +14,7 @@ import platform import sys +py313 = sys.version_info >= (3, 13) py312 = sys.version_info >= (3, 12) py311 = sys.version_info >= (3, 11) py310 = sys.version_info >= (3, 10) diff --git a/test/ext/test_extendedattr.py b/test/ext/test_extendedattr.py index c762754bc58..d895f74a9ee 100644 --- a/test/ext/test_extendedattr.py +++ b/test/ext/test_extendedattr.py @@ -156,7 +156,8 @@ def __sa_instrumentation_manager__(cls): ) # This proves SA can handle a class with non-string dict keys - if util.cpython: + # Since python 3.13 non-string key raise a runtime warning. + if util.cpython and not util.py313: locals()[42] = 99 # Don't remove this line! def __init__(self, **kwargs): From ef8da0e6038c8f1a88cefde4cb02bb745cb6ecba Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Tue, 8 Oct 2024 23:22:20 +0200 Subject: [PATCH 623/632] fix typo in mapper doc string Change-Id: I10fd7bdb0f0564a5beadfe3fa9fbb7e5ea88362c (cherry picked from commit 74a8e2ced922183d6ad072eced904cb989113fa2) --- lib/sqlalchemy/orm/mapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index ba668b8aedc..4da1a5673a4 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -321,7 +321,7 @@ class User(Base): mapping of the class to an alternate selectable, for loading only. - .. seealso:: + .. seealso:: :ref:`relationship_aliased_class` - the new pattern that removes the need for the :paramref:`_orm.Mapper.non_primary` flag. From f42a4995caada93f8f64560982ec2184a397894a Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Thu, 10 Oct 2024 22:25:39 +0200 Subject: [PATCH 624/632] remove fully tested in ci reference since it's confusing Change-Id: I5d1c14b2c2b3bcbb55861e1c4a90ffafe8ee00fa (cherry picked from commit 8684c8dda6cde2f470ad16827b09eb6d4bb1c6d8) --- doc/build/dialects/index.rst | 25 +++++++++++----------- lib/sqlalchemy/dialects/mssql/base.py | 1 - lib/sqlalchemy/dialects/mysql/base.py | 1 - lib/sqlalchemy/dialects/oracle/base.py | 1 - lib/sqlalchemy/dialects/postgresql/base.py | 1 - lib/sqlalchemy/dialects/sqlite/base.py | 1 - 6 files changed, 12 insertions(+), 18 deletions(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index 6f297124776..dd8be484efe 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -22,8 +22,8 @@ Included Dialects oracle mssql -Support Levels for Included Dialects -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Supported versions for Included Dialects +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The following table summarizes the support level for each included dialect. @@ -33,21 +33,20 @@ The following table summarizes the support level for each included dialect. Support Definitions ^^^^^^^^^^^^^^^^^^^ -.. glossary:: + .. Fully tested in CI + .. **Fully tested in CI** indicates a version that is tested in the sqlalchemy + .. CI system and passes all the tests in the test suite. - Fully tested in CI - **Fully tested in CI** indicates a version that is tested in the sqlalchemy - CI system and passes all the tests in the test suite. +.. glossary:: - Normal support - **Normal support** indicates that most features should work, - but not all versions are tested in the ci configuration so there may - be some not supported edge cases. We will try to fix issues that affect - these versions. + Supported version + **Supported version** indicates that most SQLAlchemy features should work + for the mentioned database version. Since not all database versions may be + tested in the ci there may be some not working edge cases. Best effort - **Best effort** indicates that we try to support basic features on them, - but most likely there will be unsupported features or errors in some use cases. + **Best effort** indicates that SQLAlchemy tries to support basic features on these + versions, but most likely there will be unsupported features or errors in some use cases. Pull requests with associated issues may be accepted to continue supporting older versions, which are reviewed on a case-by-case basis. diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index efaec75c540..5bdbc2ecf31 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -7,7 +7,6 @@ """ .. dialect:: mssql :name: Microsoft SQL Server - :full_support: 2017 :normal_support: 2012+ :best_effort: 2005+ diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 8684c69d298..f9fc6cde47e 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -9,7 +9,6 @@ .. dialect:: mysql :name: MySQL / MariaDB - :full_support: 5.6, 5.7, 8.0 / 10.8, 10.9 :normal_support: 5.6+ / 10+ :best_effort: 5.0.2+ / 5.0.2+ diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 2f64b9dfdae..e57a9770c22 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -8,7 +8,6 @@ r""" .. dialect:: oracle :name: Oracle - :full_support: 18c :normal_support: 11+ :best_effort: 8+ diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index 652a6956704..c63eb27a835 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -8,7 +8,6 @@ r""" .. dialect:: postgresql :name: PostgreSQL - :full_support: 12, 13, 14, 15 :normal_support: 9.6+ :best_effort: 8+ diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index c171136ac2b..0c8aa903117 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -8,7 +8,6 @@ r""" .. dialect:: sqlite :name: SQLite - :full_support: 3.36.0 :normal_support: 3.12+ :best_effort: 3.7.16+ From ecad1420ba5404337cb17d2c46c17040c35d46d7 Mon Sep 17 00:00:00 2001 From: Gord Thompson Date: Wed, 30 Oct 2024 11:30:40 -0600 Subject: [PATCH 625/632] Update bigquery dialect link (#12048) (cherry picked from commit 58822b9e2412dfefdced95164943fdb515e2f52c) --- doc/build/dialects/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst index dd8be484efe..ebcb72eaaad 100644 --- a/doc/build/dialects/index.rst +++ b/doc/build/dialects/index.rst @@ -104,7 +104,7 @@ Currently maintained external dialect projects for SQLAlchemy include: +------------------------------------------------+---------------------------------------+ | Firebolt | firebolt-sqlalchemy_ | +------------------------------------------------+---------------------------------------+ -| Google BigQuery | pybigquery_ | +| Google BigQuery | sqlalchemy-bigquery_ | +------------------------------------------------+---------------------------------------+ | Google Sheets | gsheets_ | +------------------------------------------------+---------------------------------------+ @@ -141,7 +141,7 @@ Currently maintained external dialect projects for SQLAlchemy include: .. _ibm-db-sa: https://pypi.org/project/ibm-db-sa/ .. _PyHive: https://github.com/dropbox/PyHive#sqlalchemy .. _teradatasqlalchemy: https://pypi.org/project/teradatasqlalchemy/ -.. _pybigquery: https://github.com/mxmzdlv/pybigquery/ +.. _sqlalchemy-bigquery: https://pypi.org/project/sqlalchemy-bigquery/ .. _sqlalchemy-redshift: https://pypi.org/project/sqlalchemy-redshift .. _sqlalchemy-drill: https://github.com/JohnOmernik/sqlalchemy-drill .. _sqlalchemy-hana: https://github.com/SAP/sqlalchemy-hana From ae359f85742ce0cee6f6cf735fcb9010f6d0424d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 2 Jan 2025 16:41:57 -0500 Subject: [PATCH 626/632] 2025 Change-Id: Ifb33b8df2f838851f329415fa70f494acb4ccde5 --- LICENSE | 2 +- doc/build/conf.py | 2 +- doc/build/copyright.rst | 2 +- lib/sqlalchemy/__init__.py | 2 +- lib/sqlalchemy/cextension/immutabledict.c | 2 +- lib/sqlalchemy/cextension/processors.c | 2 +- lib/sqlalchemy/cextension/resultproxy.c | 2 +- lib/sqlalchemy/connectors/__init__.py | 2 +- lib/sqlalchemy/connectors/mxodbc.py | 2 +- lib/sqlalchemy/connectors/pyodbc.py | 2 +- lib/sqlalchemy/databases/__init__.py | 2 +- lib/sqlalchemy/dialects/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/__init__.py | 2 +- lib/sqlalchemy/dialects/firebird/base.py | 2 +- lib/sqlalchemy/dialects/firebird/fdb.py | 2 +- lib/sqlalchemy/dialects/firebird/kinterbasdb.py | 2 +- lib/sqlalchemy/dialects/mssql/__init__.py | 2 +- lib/sqlalchemy/dialects/mssql/base.py | 2 +- lib/sqlalchemy/dialects/mssql/information_schema.py | 2 +- lib/sqlalchemy/dialects/mssql/json.py | 2 +- lib/sqlalchemy/dialects/mssql/mxodbc.py | 2 +- lib/sqlalchemy/dialects/mssql/provision.py | 2 +- lib/sqlalchemy/dialects/mssql/pymssql.py | 2 +- lib/sqlalchemy/dialects/mssql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/__init__.py | 2 +- lib/sqlalchemy/dialects/mysql/aiomysql.py | 2 +- lib/sqlalchemy/dialects/mysql/asyncmy.py | 2 +- lib/sqlalchemy/dialects/mysql/base.py | 2 +- lib/sqlalchemy/dialects/mysql/cymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/dml.py | 2 +- lib/sqlalchemy/dialects/mysql/enumerated.py | 2 +- lib/sqlalchemy/dialects/mysql/expression.py | 2 +- lib/sqlalchemy/dialects/mysql/json.py | 2 +- lib/sqlalchemy/dialects/mysql/mariadb.py | 2 +- lib/sqlalchemy/dialects/mysql/mariadbconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqlconnector.py | 2 +- lib/sqlalchemy/dialects/mysql/mysqldb.py | 2 +- lib/sqlalchemy/dialects/mysql/oursql.py | 2 +- lib/sqlalchemy/dialects/mysql/provision.py | 2 +- lib/sqlalchemy/dialects/mysql/pymysql.py | 2 +- lib/sqlalchemy/dialects/mysql/pyodbc.py | 2 +- lib/sqlalchemy/dialects/mysql/reflection.py | 2 +- lib/sqlalchemy/dialects/mysql/reserved_words.py | 2 +- lib/sqlalchemy/dialects/mysql/types.py | 2 +- lib/sqlalchemy/dialects/oracle/__init__.py | 2 +- lib/sqlalchemy/dialects/oracle/base.py | 2 +- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 2 +- lib/sqlalchemy/dialects/oracle/provision.py | 2 +- lib/sqlalchemy/dialects/postgresql/__init__.py | 2 +- lib/sqlalchemy/dialects/postgresql/array.py | 2 +- lib/sqlalchemy/dialects/postgresql/asyncpg.py | 2 +- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- lib/sqlalchemy/dialects/postgresql/dml.py | 2 +- lib/sqlalchemy/dialects/postgresql/ext.py | 2 +- lib/sqlalchemy/dialects/postgresql/hstore.py | 2 +- lib/sqlalchemy/dialects/postgresql/json.py | 2 +- lib/sqlalchemy/dialects/postgresql/pg8000.py | 2 +- lib/sqlalchemy/dialects/postgresql/provision.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 2 +- lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py | 2 +- lib/sqlalchemy/dialects/postgresql/pygresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/pypostgresql.py | 2 +- lib/sqlalchemy/dialects/postgresql/ranges.py | 2 +- lib/sqlalchemy/dialects/sqlite/__init__.py | 2 +- lib/sqlalchemy/dialects/sqlite/aiosqlite.py | 2 +- lib/sqlalchemy/dialects/sqlite/base.py | 2 +- lib/sqlalchemy/dialects/sqlite/dml.py | 2 +- lib/sqlalchemy/dialects/sqlite/json.py | 2 +- lib/sqlalchemy/dialects/sqlite/provision.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlcipher.py | 2 +- lib/sqlalchemy/dialects/sqlite/pysqlite.py | 2 +- lib/sqlalchemy/dialects/sybase/__init__.py | 2 +- lib/sqlalchemy/dialects/sybase/base.py | 2 +- lib/sqlalchemy/dialects/sybase/mxodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pyodbc.py | 2 +- lib/sqlalchemy/dialects/sybase/pysybase.py | 2 +- lib/sqlalchemy/engine/__init__.py | 2 +- lib/sqlalchemy/engine/base.py | 2 +- lib/sqlalchemy/engine/characteristics.py | 2 +- lib/sqlalchemy/engine/create.py | 2 +- lib/sqlalchemy/engine/cursor.py | 2 +- lib/sqlalchemy/engine/default.py | 2 +- lib/sqlalchemy/engine/events.py | 2 +- lib/sqlalchemy/engine/interfaces.py | 2 +- lib/sqlalchemy/engine/mock.py | 2 +- lib/sqlalchemy/engine/reflection.py | 2 +- lib/sqlalchemy/engine/result.py | 2 +- lib/sqlalchemy/engine/row.py | 2 +- lib/sqlalchemy/engine/strategies.py | 2 +- lib/sqlalchemy/engine/url.py | 2 +- lib/sqlalchemy/engine/util.py | 2 +- lib/sqlalchemy/event/__init__.py | 2 +- lib/sqlalchemy/event/api.py | 2 +- lib/sqlalchemy/event/attr.py | 2 +- lib/sqlalchemy/event/base.py | 2 +- lib/sqlalchemy/event/legacy.py | 2 +- lib/sqlalchemy/event/registry.py | 2 +- lib/sqlalchemy/events.py | 2 +- lib/sqlalchemy/exc.py | 2 +- lib/sqlalchemy/ext/__init__.py | 2 +- lib/sqlalchemy/ext/associationproxy.py | 2 +- lib/sqlalchemy/ext/asyncio/__init__.py | 2 +- lib/sqlalchemy/ext/asyncio/base.py | 2 +- lib/sqlalchemy/ext/asyncio/engine.py | 2 +- lib/sqlalchemy/ext/asyncio/events.py | 2 +- lib/sqlalchemy/ext/asyncio/exc.py | 2 +- lib/sqlalchemy/ext/asyncio/result.py | 2 +- lib/sqlalchemy/ext/asyncio/scoping.py | 2 +- lib/sqlalchemy/ext/asyncio/session.py | 2 +- lib/sqlalchemy/ext/automap.py | 2 +- lib/sqlalchemy/ext/baked.py | 2 +- lib/sqlalchemy/ext/compiler.py | 2 +- lib/sqlalchemy/ext/declarative/__init__.py | 2 +- lib/sqlalchemy/ext/declarative/extensions.py | 2 +- lib/sqlalchemy/ext/horizontal_shard.py | 2 +- lib/sqlalchemy/ext/hybrid.py | 2 +- lib/sqlalchemy/ext/indexable.py | 2 +- lib/sqlalchemy/ext/instrumentation.py | 2 +- lib/sqlalchemy/ext/mutable.py | 2 +- lib/sqlalchemy/ext/mypy/__init__.py | 2 +- lib/sqlalchemy/ext/mypy/apply.py | 2 +- lib/sqlalchemy/ext/mypy/decl_class.py | 2 +- lib/sqlalchemy/ext/mypy/infer.py | 2 +- lib/sqlalchemy/ext/mypy/names.py | 2 +- lib/sqlalchemy/ext/mypy/plugin.py | 2 +- lib/sqlalchemy/ext/mypy/util.py | 2 +- lib/sqlalchemy/ext/orderinglist.py | 2 +- lib/sqlalchemy/ext/serializer.py | 2 +- lib/sqlalchemy/future/__init__.py | 2 +- lib/sqlalchemy/future/engine.py | 2 +- lib/sqlalchemy/future/orm/__init__.py | 2 +- lib/sqlalchemy/inspection.py | 2 +- lib/sqlalchemy/log.py | 2 +- lib/sqlalchemy/orm/__init__.py | 2 +- lib/sqlalchemy/orm/attributes.py | 2 +- lib/sqlalchemy/orm/base.py | 2 +- lib/sqlalchemy/orm/clsregistry.py | 2 +- lib/sqlalchemy/orm/collections.py | 2 +- lib/sqlalchemy/orm/context.py | 2 +- lib/sqlalchemy/orm/decl_api.py | 2 +- lib/sqlalchemy/orm/decl_base.py | 2 +- lib/sqlalchemy/orm/dependency.py | 2 +- lib/sqlalchemy/orm/descriptor_props.py | 2 +- lib/sqlalchemy/orm/dynamic.py | 2 +- lib/sqlalchemy/orm/evaluator.py | 2 +- lib/sqlalchemy/orm/events.py | 2 +- lib/sqlalchemy/orm/exc.py | 2 +- lib/sqlalchemy/orm/identity.py | 2 +- lib/sqlalchemy/orm/instrumentation.py | 2 +- lib/sqlalchemy/orm/interfaces.py | 2 +- lib/sqlalchemy/orm/loading.py | 2 +- lib/sqlalchemy/orm/mapper.py | 2 +- lib/sqlalchemy/orm/path_registry.py | 2 +- lib/sqlalchemy/orm/persistence.py | 2 +- lib/sqlalchemy/orm/properties.py | 2 +- lib/sqlalchemy/orm/query.py | 2 +- lib/sqlalchemy/orm/relationships.py | 2 +- lib/sqlalchemy/orm/scoping.py | 2 +- lib/sqlalchemy/orm/session.py | 2 +- lib/sqlalchemy/orm/state.py | 2 +- lib/sqlalchemy/orm/strategies.py | 2 +- lib/sqlalchemy/orm/strategy_options.py | 2 +- lib/sqlalchemy/orm/sync.py | 2 +- lib/sqlalchemy/orm/unitofwork.py | 2 +- lib/sqlalchemy/orm/util.py | 2 +- lib/sqlalchemy/pool/__init__.py | 2 +- lib/sqlalchemy/pool/base.py | 2 +- lib/sqlalchemy/pool/dbapi_proxy.py | 2 +- lib/sqlalchemy/pool/events.py | 2 +- lib/sqlalchemy/pool/impl.py | 2 +- lib/sqlalchemy/processors.py | 2 +- lib/sqlalchemy/schema.py | 2 +- lib/sqlalchemy/sql/__init__.py | 2 +- lib/sqlalchemy/sql/annotation.py | 2 +- lib/sqlalchemy/sql/base.py | 2 +- lib/sqlalchemy/sql/coercions.py | 2 +- lib/sqlalchemy/sql/compiler.py | 2 +- lib/sqlalchemy/sql/crud.py | 2 +- lib/sqlalchemy/sql/ddl.py | 2 +- lib/sqlalchemy/sql/default_comparator.py | 2 +- lib/sqlalchemy/sql/dml.py | 2 +- lib/sqlalchemy/sql/elements.py | 2 +- lib/sqlalchemy/sql/events.py | 2 +- lib/sqlalchemy/sql/expression.py | 2 +- lib/sqlalchemy/sql/functions.py | 2 +- lib/sqlalchemy/sql/lambdas.py | 2 +- lib/sqlalchemy/sql/naming.py | 2 +- lib/sqlalchemy/sql/operators.py | 2 +- lib/sqlalchemy/sql/roles.py | 2 +- lib/sqlalchemy/sql/schema.py | 2 +- lib/sqlalchemy/sql/selectable.py | 2 +- lib/sqlalchemy/sql/sqltypes.py | 2 +- lib/sqlalchemy/sql/traversals.py | 2 +- lib/sqlalchemy/sql/type_api.py | 2 +- lib/sqlalchemy/sql/util.py | 2 +- lib/sqlalchemy/sql/visitors.py | 2 +- lib/sqlalchemy/testing/__init__.py | 2 +- lib/sqlalchemy/testing/assertions.py | 2 +- lib/sqlalchemy/testing/assertsql.py | 2 +- lib/sqlalchemy/testing/asyncio.py | 2 +- lib/sqlalchemy/testing/config.py | 2 +- lib/sqlalchemy/testing/engines.py | 2 +- lib/sqlalchemy/testing/entities.py | 2 +- lib/sqlalchemy/testing/exclusions.py | 2 +- lib/sqlalchemy/testing/fixtures.py | 2 +- lib/sqlalchemy/testing/mock.py | 2 +- lib/sqlalchemy/testing/pickleable.py | 2 +- lib/sqlalchemy/testing/plugin/__init__.py | 2 +- lib/sqlalchemy/testing/plugin/bootstrap.py | 2 +- lib/sqlalchemy/testing/plugin/plugin_base.py | 2 +- lib/sqlalchemy/testing/plugin/pytestplugin.py | 2 +- lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py | 2 +- lib/sqlalchemy/testing/profiling.py | 2 +- lib/sqlalchemy/testing/provision.py | 2 +- lib/sqlalchemy/testing/requirements.py | 2 +- lib/sqlalchemy/testing/schema.py | 2 +- lib/sqlalchemy/testing/suite/__init__.py | 2 +- lib/sqlalchemy/testing/suite/test_cte.py | 2 +- lib/sqlalchemy/testing/suite/test_ddl.py | 2 +- lib/sqlalchemy/testing/suite/test_deprecations.py | 2 +- lib/sqlalchemy/testing/suite/test_dialect.py | 2 +- lib/sqlalchemy/testing/suite/test_insert.py | 2 +- lib/sqlalchemy/testing/suite/test_reflection.py | 2 +- lib/sqlalchemy/testing/suite/test_results.py | 2 +- lib/sqlalchemy/testing/suite/test_rowcount.py | 2 +- lib/sqlalchemy/testing/suite/test_select.py | 2 +- lib/sqlalchemy/testing/suite/test_sequence.py | 2 +- lib/sqlalchemy/testing/suite/test_types.py | 2 +- lib/sqlalchemy/testing/suite/test_unicode_ddl.py | 2 +- lib/sqlalchemy/testing/suite/test_update_delete.py | 2 +- lib/sqlalchemy/testing/util.py | 2 +- lib/sqlalchemy/testing/warnings.py | 2 +- lib/sqlalchemy/types.py | 2 +- lib/sqlalchemy/util/__init__.py | 2 +- lib/sqlalchemy/util/_collections.py | 2 +- lib/sqlalchemy/util/_compat_py3k.py | 2 +- lib/sqlalchemy/util/_concurrency_py3k.py | 2 +- lib/sqlalchemy/util/_preloaded.py | 2 +- lib/sqlalchemy/util/compat.py | 2 +- lib/sqlalchemy/util/concurrency.py | 2 +- lib/sqlalchemy/util/deprecations.py | 2 +- lib/sqlalchemy/util/langhelpers.py | 2 +- lib/sqlalchemy/util/queue.py | 2 +- lib/sqlalchemy/util/tool_support.py | 2 +- lib/sqlalchemy/util/topological.py | 2 +- setup.cfg | 1 + 246 files changed, 246 insertions(+), 245 deletions(-) diff --git a/LICENSE b/LICENSE index 967cdc5dc10..dfe1a4d815b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright 2005-2024 SQLAlchemy authors and contributors . +Copyright 2005-2025 SQLAlchemy authors and contributors . Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/build/conf.py b/doc/build/conf.py index c3aa5521a77..115b3c805b3 100644 --- a/doc/build/conf.py +++ b/doc/build/conf.py @@ -212,7 +212,7 @@ # General information about the project. project = u"SQLAlchemy" -copyright = u"2007-2024, the SQLAlchemy authors and contributors" # noqa +copyright = u"2007-2025, the SQLAlchemy authors and contributors" # noqa # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the diff --git a/doc/build/copyright.rst b/doc/build/copyright.rst index b3a67ccf469..54535474c42 100644 --- a/doc/build/copyright.rst +++ b/doc/build/copyright.rst @@ -6,7 +6,7 @@ Appendix: Copyright This is the MIT license: ``_ -Copyright (c) 2005-2024 Michael Bayer and contributors. +Copyright (c) 2005-2025 Michael Bayer and contributors. SQLAlchemy is a trademark of Michael Bayer. Permission is hereby granted, free of charge, to any person obtaining a copy of this diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 695b25dd8ca..b8ba94208ae 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -1,5 +1,5 @@ # __init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/immutabledict.c b/lib/sqlalchemy/cextension/immutabledict.c index 2d6bd962257..2bd9a1e4abc 100644 --- a/lib/sqlalchemy/cextension/immutabledict.c +++ b/lib/sqlalchemy/cextension/immutabledict.c @@ -1,6 +1,6 @@ /* immuatbledict.c -Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +Copyright (C) 2005-2025 the SQLAlchemy authors and contributors This module is part of SQLAlchemy and is released under the MIT License: https://www.opensource.org/licenses/mit-license.php diff --git a/lib/sqlalchemy/cextension/processors.c b/lib/sqlalchemy/cextension/processors.c index 12ed79e96ac..e5bef9ad824 100644 --- a/lib/sqlalchemy/cextension/processors.c +++ b/lib/sqlalchemy/cextension/processors.c @@ -1,6 +1,6 @@ /* processors.c -Copyright (C) 2010-2024 the SQLAlchemy authors and contributors +Copyright (C) 2010-2025 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/cextension/resultproxy.c b/lib/sqlalchemy/cextension/resultproxy.c index a88af0ede96..88f0ecb5562 100644 --- a/lib/sqlalchemy/cextension/resultproxy.c +++ b/lib/sqlalchemy/cextension/resultproxy.c @@ -1,6 +1,6 @@ /* resultproxy.c -Copyright (C) 2010-2024 the SQLAlchemy authors and contributors +Copyright (C) 2010-2025 the SQLAlchemy authors and contributors Copyright (C) 2010-2011 Gaetan de Menten gdementen@gmail.com This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/__init__.py b/lib/sqlalchemy/connectors/__init__.py index 518d2345c31..f293a4f181e 100644 --- a/lib/sqlalchemy/connectors/__init__.py +++ b/lib/sqlalchemy/connectors/__init__.py @@ -1,5 +1,5 @@ # connectors/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index df119229e6e..bfdabf5ed1f 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -1,5 +1,5 @@ # connectors/mxodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index d0c27231ac8..8ec998bf6f6 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -1,5 +1,5 @@ # connectors/pyodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/databases/__init__.py b/lib/sqlalchemy/databases/__init__.py index 09d7ef2436a..8aa089d505d 100644 --- a/lib/sqlalchemy/databases/__init__.py +++ b/lib/sqlalchemy/databases/__init__.py @@ -1,5 +1,5 @@ # databases/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py index 78bf4d14b2f..2fff37c65a9 100644 --- a/lib/sqlalchemy/dialects/__init__.py +++ b/lib/sqlalchemy/dialects/__init__.py @@ -1,5 +1,5 @@ # dialects/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py index 95fa165ea4d..609896df20c 100644 --- a/lib/sqlalchemy/dialects/firebird/__init__.py +++ b/lib/sqlalchemy/dialects/firebird/__init__.py @@ -1,5 +1,5 @@ # dialects/firebird/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index 36129b88de6..61537242fb5 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -1,5 +1,5 @@ # dialects/firebird/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index a23c4fde09d..874983f09c6 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -1,5 +1,5 @@ # dialects/firebird/fdb.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index 5a572cec66c..f65aeb41a23 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -1,5 +1,5 @@ # dialects/firebird/kinterbasdb.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py index c13f066cb84..0a2f557b08f 100644 --- a/lib/sqlalchemy/dialects/mssql/__init__.py +++ b/lib/sqlalchemy/dialects/mssql/__init__.py @@ -1,5 +1,5 @@ # dialects/mssql/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 5bdbc2ecf31..1607a4d67d0 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1,5 +1,5 @@ # dialects/mssql/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index 7f538d6b9a0..13e0a777361 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -1,5 +1,5 @@ # dialects/mssql/information_schema.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/json.py b/lib/sqlalchemy/dialects/mssql/json.py index c857ea677ad..450bec29e8c 100644 --- a/lib/sqlalchemy/dialects/mssql/json.py +++ b/lib/sqlalchemy/dialects/mssql/json.py @@ -1,5 +1,5 @@ # dialects/mssql/json.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index f19c9f525f9..554992a0d61 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,5 +1,5 @@ # dialects/mssql/mxodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/provision.py b/lib/sqlalchemy/dialects/mssql/provision.py index 20c39f0d11f..bc4c6cb841f 100644 --- a/lib/sqlalchemy/dialects/mssql/provision.py +++ b/lib/sqlalchemy/dialects/mssql/provision.py @@ -1,5 +1,5 @@ # dialects/mssql/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index 052521fc3fe..49588bde167 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,5 +1,5 @@ # dialects/mssql/pymssql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index ec274090da2..82210d0f8dd 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,5 +1,5 @@ # dialects/mssql/pyodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index 24bead67ec0..0ff338a3030 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -1,5 +1,5 @@ # dialects/mysql/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/aiomysql.py b/lib/sqlalchemy/dialects/mysql/aiomysql.py index 18dad8e53ce..5def0121e5a 100644 --- a/lib/sqlalchemy/dialects/mysql/aiomysql.py +++ b/lib/sqlalchemy/dialects/mysql/aiomysql.py @@ -1,5 +1,5 @@ # dialects/mysql/aiomysql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/asyncmy.py b/lib/sqlalchemy/dialects/mysql/asyncmy.py index 2562795e8f7..6ea2a5a9bcd 100644 --- a/lib/sqlalchemy/dialects/mysql/asyncmy.py +++ b/lib/sqlalchemy/dialects/mysql/asyncmy.py @@ -1,5 +1,5 @@ # dialects/mysql/asyncmy.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index f9fc6cde47e..ef69e249796 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,5 +1,5 @@ # dialects/mysql/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index a1959b02385..cd1ed0d2064 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -1,5 +1,5 @@ # dialects/mysql/cymysql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/dml.py b/lib/sqlalchemy/dialects/mysql/dml.py index 4c8b8eac1c3..d1a4e3137f7 100644 --- a/lib/sqlalchemy/dialects/mysql/dml.py +++ b/lib/sqlalchemy/dialects/mysql/dml.py @@ -1,5 +1,5 @@ # dialects/mysql/dml.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py index a6b698781e4..adc95e102e0 100644 --- a/lib/sqlalchemy/dialects/mysql/enumerated.py +++ b/lib/sqlalchemy/dialects/mysql/enumerated.py @@ -1,5 +1,5 @@ # dialects/mysql/enumerated.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/expression.py b/lib/sqlalchemy/dialects/mysql/expression.py index 774a8cbaef3..e6a8af928f8 100644 --- a/lib/sqlalchemy/dialects/mysql/expression.py +++ b/lib/sqlalchemy/dialects/mysql/expression.py @@ -1,5 +1,5 @@ # dialects/mysql/expression.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py index d0cde0bbc15..2a0d81468ce 100644 --- a/lib/sqlalchemy/dialects/mysql/json.py +++ b/lib/sqlalchemy/dialects/mysql/json.py @@ -1,5 +1,5 @@ # dialects/mysql/json.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadb.py b/lib/sqlalchemy/dialects/mysql/mariadb.py index e64f554d2c6..741e119ce3f 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadb.py +++ b/lib/sqlalchemy/dialects/mysql/mariadb.py @@ -1,5 +1,5 @@ # dialects/mysql/mariadb.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py index 8e1fb39770e..65c5ca96eb1 100644 --- a/lib/sqlalchemy/dialects/mysql/mariadbconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mariadbconnector.py @@ -1,5 +1,5 @@ # dialects/mysql/mariadbconnector.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index 59b96b045ae..89a11045c6b 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,5 +1,5 @@ # dialects/mysql/mysqlconnector.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index fc639647d9a..4457c6c242e 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,5 @@ # dialects/mysql/mysqldb.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index ec34003c2b8..3ccfbad0ac5 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,5 @@ # dialects/mysql/oursql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/provision.py b/lib/sqlalchemy/dialects/mysql/provision.py index b8c6cd5d0e5..432bfbc91b6 100644 --- a/lib/sqlalchemy/dialects/mysql/provision.py +++ b/lib/sqlalchemy/dialects/mysql/provision.py @@ -1,5 +1,5 @@ # dialects/mysql/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index 951e21dc056..7b4830932a5 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -1,5 +1,5 @@ # dialects/mysql/pymysql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index f09668bc3f8..a02d9b29a2f 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,5 @@ # dialects/mysql/pyodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py index b8b21c1ba4a..078e3d5339b 100644 --- a/lib/sqlalchemy/dialects/mysql/reflection.py +++ b/lib/sqlalchemy/dialects/mysql/reflection.py @@ -1,5 +1,5 @@ # dialects/mysql/reflection.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/reserved_words.py b/lib/sqlalchemy/dialects/mysql/reserved_words.py index ecded855828..7055dd5c67e 100644 --- a/lib/sqlalchemy/dialects/mysql/reserved_words.py +++ b/lib/sqlalchemy/dialects/mysql/reserved_words.py @@ -1,5 +1,5 @@ # dialects/mysql/reserved_words.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py index a7996189b76..a304f29b9ef 100644 --- a/lib/sqlalchemy/dialects/mysql/types.py +++ b/lib/sqlalchemy/dialects/mysql/types.py @@ -1,5 +1,5 @@ # dialects/mysql/types.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py index 6ffeb962546..9dfc3f017c4 100644 --- a/lib/sqlalchemy/dialects/oracle/__init__.py +++ b/lib/sqlalchemy/dialects/oracle/__init__.py @@ -1,5 +1,5 @@ # dialects/oracle/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index e57a9770c22..45f0b62893b 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -1,5 +1,5 @@ # dialects/oracle/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index 9b0f464b2a9..c334f5042a0 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -1,5 +1,5 @@ # dialects/oracle/cx_oracle.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/oracle/provision.py b/lib/sqlalchemy/dialects/oracle/provision.py index d517abec101..58f5853df35 100644 --- a/lib/sqlalchemy/dialects/oracle/provision.py +++ b/lib/sqlalchemy/dialects/oracle/provision.py @@ -1,5 +1,5 @@ # dialects/oracle/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py index 470f6cadb0f..2227a8eb5fb 100644 --- a/lib/sqlalchemy/dialects/postgresql/__init__.py +++ b/lib/sqlalchemy/dialects/postgresql/__init__.py @@ -1,5 +1,5 @@ # dialects/postgresql/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py index a401d234b8e..fd719dbe739 100644 --- a/lib/sqlalchemy/dialects/postgresql/array.py +++ b/lib/sqlalchemy/dialects/postgresql/array.py @@ -1,5 +1,5 @@ # dialects/postgresql/array.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/asyncpg.py b/lib/sqlalchemy/dialects/postgresql/asyncpg.py index 968dfbd3be5..5c4f831048e 100644 --- a/lib/sqlalchemy/dialects/postgresql/asyncpg.py +++ b/lib/sqlalchemy/dialects/postgresql/asyncpg.py @@ -1,5 +1,5 @@ # dialects/postgresql/asyncpg.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c63eb27a835..c3cedf6d605 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -1,5 +1,5 @@ # dialects/postgresql/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py index e9802f11990..dbd9c28b113 100644 --- a/lib/sqlalchemy/dialects/postgresql/dml.py +++ b/lib/sqlalchemy/dialects/postgresql/dml.py @@ -1,5 +1,5 @@ # dialects/postgresql/dml.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py index 47ed99f9b87..a0fa2fcb854 100644 --- a/lib/sqlalchemy/dialects/postgresql/ext.py +++ b/lib/sqlalchemy/dialects/postgresql/ext.py @@ -1,5 +1,5 @@ # dialects/postgresql/ext.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 8d8de550a02..379f54f5554 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -1,5 +1,5 @@ # dialects/postgresql/hstore.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index a72a6781d92..dbe92a4ae93 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -1,5 +1,5 @@ # dialects/postgresql/json.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py index 09b58f83533..186f0ecf02d 100644 --- a/lib/sqlalchemy/dialects/postgresql/pg8000.py +++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py @@ -1,5 +1,5 @@ # dialects/postgresql/pg8000.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/provision.py b/lib/sqlalchemy/dialects/postgresql/provision.py index bc69c0f6197..0b315469c99 100644 --- a/lib/sqlalchemy/dialects/postgresql/provision.py +++ b/lib/sqlalchemy/dialects/postgresql/provision.py @@ -1,5 +1,5 @@ # dialects/postgresql/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index c2bd530ecd7..cd2b217eabb 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -1,5 +1,5 @@ # dialects/postgresql/psycopg2.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py index 7483d3b5291..4bdb924cc07 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py @@ -1,5 +1,5 @@ # dialects/postgresql/psycopg2cffi.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py index d3b7df9688f..73eb8d0225e 100644 --- a/lib/sqlalchemy/dialects/postgresql/pygresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py @@ -1,5 +1,5 @@ # dialects/postgresql/pygresql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index f152b4a2489..d5e35695941 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -1,5 +1,5 @@ # dialects/postgresql/pypostgresql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py index 800ff274f89..cfd01790772 100644 --- a/lib/sqlalchemy/dialects/postgresql/ranges.py +++ b/lib/sqlalchemy/dialects/postgresql/ranges.py @@ -1,5 +1,5 @@ # dialects/postgresql/ranges.py -# Copyright (C) 2013-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py index 8bde524f1ea..83dd3378129 100644 --- a/lib/sqlalchemy/dialects/sqlite/__init__.py +++ b/lib/sqlalchemy/dialects/sqlite/__init__.py @@ -1,5 +1,5 @@ # dialects/sqlite/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py index e51ca9573d6..78304ed8195 100644 --- a/lib/sqlalchemy/dialects/sqlite/aiosqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/aiosqlite.py @@ -1,5 +1,5 @@ # dialects/sqlite/aiosqlite.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 0c8aa903117..21ee004997f 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -1,5 +1,5 @@ # dialects/sqlite/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/dml.py b/lib/sqlalchemy/dialects/sqlite/dml.py index 7263b6a75f5..f3fe7c19e38 100644 --- a/lib/sqlalchemy/dialects/sqlite/dml.py +++ b/lib/sqlalchemy/dialects/sqlite/dml.py @@ -1,5 +1,5 @@ # dialects/sqlite/dml.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/json.py b/lib/sqlalchemy/dialects/sqlite/json.py index 32008e94ab2..1dda17f63f8 100644 --- a/lib/sqlalchemy/dialects/sqlite/json.py +++ b/lib/sqlalchemy/dialects/sqlite/json.py @@ -1,5 +1,5 @@ # dialects/sqlite/json.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/provision.py b/lib/sqlalchemy/dialects/sqlite/provision.py index 764ffacd603..3faa2564460 100644 --- a/lib/sqlalchemy/dialects/sqlite/provision.py +++ b/lib/sqlalchemy/dialects/sqlite/provision.py @@ -1,5 +1,5 @@ # dialects/sqlite/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py index 1513356b942..333502b4353 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py @@ -1,5 +1,5 @@ # dialects/sqlite/pysqlcipher.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index f3de9b1bcb8..9ca735dcef1 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -1,5 +1,5 @@ # dialects/sqlite/pysqlite.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py index f41fa0b65a9..98627d48e56 100644 --- a/lib/sqlalchemy/dialects/sybase/__init__.py +++ b/lib/sqlalchemy/dialects/sybase/__init__.py @@ -1,5 +1,5 @@ # dialects/sybase/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 4d94b3d4a53..bab2f407d64 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,5 +1,5 @@ # dialects/sybase/base.py -# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # get_select_precolumns(), limit_clause() implementation # copyright (C) 2007 Fisch Asset Management diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py index 19d0d464885..5dcf5c87f44 100644 --- a/lib/sqlalchemy/dialects/sybase/mxodbc.py +++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py @@ -1,5 +1,5 @@ # dialects/sybase/mxodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index 295bac25557..4e1d2774b3e 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -1,5 +1,5 @@ # dialects/sybase/pyodbc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index 140d68f4c89..ddcd2363176 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -1,5 +1,5 @@ # dialects/sybase/pysybase.py -# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 6e92ba201d9..26750cd31de 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -1,5 +1,5 @@ # engine/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 68915259e8d..26dfa6d6fa0 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1,5 +1,5 @@ # engine/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/characteristics.py b/lib/sqlalchemy/engine/characteristics.py index 5cd3daaa2e1..232cf3b5d41 100644 --- a/lib/sqlalchemy/engine/characteristics.py +++ b/lib/sqlalchemy/engine/characteristics.py @@ -1,5 +1,5 @@ # engine/characteristics.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/create.py b/lib/sqlalchemy/engine/create.py index 239bd486022..0acbb57eff5 100644 --- a/lib/sqlalchemy/engine/create.py +++ b/lib/sqlalchemy/engine/create.py @@ -1,5 +1,5 @@ # engine/create.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/cursor.py b/lib/sqlalchemy/engine/cursor.py index 9329ce00e9e..970dbb39bfe 100644 --- a/lib/sqlalchemy/engine/cursor.py +++ b/lib/sqlalchemy/engine/cursor.py @@ -1,5 +1,5 @@ # engine/cursor.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index c93fd271405..90ca4c49566 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -1,5 +1,5 @@ # engine/default.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/events.py b/lib/sqlalchemy/engine/events.py index 544e5f394c0..45f8e950339 100644 --- a/lib/sqlalchemy/engine/events.py +++ b/lib/sqlalchemy/engine/events.py @@ -1,5 +1,5 @@ # engine/events.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/interfaces.py b/lib/sqlalchemy/engine/interfaces.py index 0363412597a..350e952097c 100644 --- a/lib/sqlalchemy/engine/interfaces.py +++ b/lib/sqlalchemy/engine/interfaces.py @@ -1,5 +1,5 @@ # engine/interfaces.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/mock.py b/lib/sqlalchemy/engine/mock.py index 6c7c908b662..00818f22a84 100644 --- a/lib/sqlalchemy/engine/mock.py +++ b/lib/sqlalchemy/engine/mock.py @@ -1,5 +1,5 @@ # engine/mock.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py index 38768c9c0d5..0367320cc37 100644 --- a/lib/sqlalchemy/engine/reflection.py +++ b/lib/sqlalchemy/engine/reflection.py @@ -1,5 +1,5 @@ # engine/reflection.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/result.py b/lib/sqlalchemy/engine/result.py index 818e3068d28..7cdeb81942d 100644 --- a/lib/sqlalchemy/engine/result.py +++ b/lib/sqlalchemy/engine/result.py @@ -1,5 +1,5 @@ # engine/result.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/row.py b/lib/sqlalchemy/engine/row.py index cdc0c850642..fb24a463498 100644 --- a/lib/sqlalchemy/engine/row.py +++ b/lib/sqlalchemy/engine/row.py @@ -1,5 +1,5 @@ # engine/row.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/strategies.py b/lib/sqlalchemy/engine/strategies.py index 9c04483707e..728eb83da21 100644 --- a/lib/sqlalchemy/engine/strategies.py +++ b/lib/sqlalchemy/engine/strategies.py @@ -1,5 +1,5 @@ # engine/strategies.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/url.py b/lib/sqlalchemy/engine/url.py index 5b12e358bda..0582d22b0b8 100644 --- a/lib/sqlalchemy/engine/url.py +++ b/lib/sqlalchemy/engine/url.py @@ -1,5 +1,5 @@ # engine/url.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/engine/util.py b/lib/sqlalchemy/engine/util.py index f118f6641e2..1e4e24613c3 100644 --- a/lib/sqlalchemy/engine/util.py +++ b/lib/sqlalchemy/engine/util.py @@ -1,5 +1,5 @@ # engine/util.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/__init__.py b/lib/sqlalchemy/event/__init__.py index ed5e121b607..3d06738db99 100644 --- a/lib/sqlalchemy/event/__init__.py +++ b/lib/sqlalchemy/event/__init__.py @@ -1,5 +1,5 @@ # event/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/api.py b/lib/sqlalchemy/event/api.py index 7855778654b..167e9e66da4 100644 --- a/lib/sqlalchemy/event/api.py +++ b/lib/sqlalchemy/event/api.py @@ -1,5 +1,5 @@ # event/api.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/attr.py b/lib/sqlalchemy/event/attr.py index b531808223c..fcbd5283786 100644 --- a/lib/sqlalchemy/event/attr.py +++ b/lib/sqlalchemy/event/attr.py @@ -1,5 +1,5 @@ # event/attr.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/base.py b/lib/sqlalchemy/event/base.py index d8282cebba1..76bb046827e 100644 --- a/lib/sqlalchemy/event/base.py +++ b/lib/sqlalchemy/event/base.py @@ -1,5 +1,5 @@ # event/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/legacy.py b/lib/sqlalchemy/event/legacy.py index 0416980ffc9..06a0ad77e86 100644 --- a/lib/sqlalchemy/event/legacy.py +++ b/lib/sqlalchemy/event/legacy.py @@ -1,5 +1,5 @@ # event/legacy.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/event/registry.py b/lib/sqlalchemy/event/registry.py index b306560dcd9..a27e345205d 100644 --- a/lib/sqlalchemy/event/registry.py +++ b/lib/sqlalchemy/event/registry.py @@ -1,5 +1,5 @@ # event/registry.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/events.py b/lib/sqlalchemy/events.py index aafbde6ba72..8ecd54e0918 100644 --- a/lib/sqlalchemy/events.py +++ b/lib/sqlalchemy/events.py @@ -1,5 +1,5 @@ # events.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py index 5ad06faec6f..e3789afad76 100644 --- a/lib/sqlalchemy/exc.py +++ b/lib/sqlalchemy/exc.py @@ -1,5 +1,5 @@ # exc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/__init__.py b/lib/sqlalchemy/ext/__init__.py index f03ed945f35..2751bcf938a 100644 --- a/lib/sqlalchemy/ext/__init__.py +++ b/lib/sqlalchemy/ext/__init__.py @@ -1,5 +1,5 @@ # ext/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/associationproxy.py b/lib/sqlalchemy/ext/associationproxy.py index 3b284624fab..d4ebf5250d7 100644 --- a/lib/sqlalchemy/ext/associationproxy.py +++ b/lib/sqlalchemy/ext/associationproxy.py @@ -1,5 +1,5 @@ # ext/associationproxy.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/__init__.py b/lib/sqlalchemy/ext/asyncio/__init__.py index 2ff1c949b2b..08132be17e5 100644 --- a/lib/sqlalchemy/ext/asyncio/__init__.py +++ b/lib/sqlalchemy/ext/asyncio/__init__.py @@ -1,5 +1,5 @@ # ext/asyncio/__init__.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/base.py b/lib/sqlalchemy/ext/asyncio/base.py index 610cc1be787..2b9798de487 100644 --- a/lib/sqlalchemy/ext/asyncio/base.py +++ b/lib/sqlalchemy/ext/asyncio/base.py @@ -1,5 +1,5 @@ # ext/asyncio/base.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/engine.py b/lib/sqlalchemy/ext/asyncio/engine.py index a902d9dc3b4..0f3f299e5aa 100644 --- a/lib/sqlalchemy/ext/asyncio/engine.py +++ b/lib/sqlalchemy/ext/asyncio/engine.py @@ -1,5 +1,5 @@ # ext/asyncio/engine.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/events.py b/lib/sqlalchemy/ext/asyncio/events.py index 1b0e3fc5ad8..dcd3ee513ab 100644 --- a/lib/sqlalchemy/ext/asyncio/events.py +++ b/lib/sqlalchemy/ext/asyncio/events.py @@ -1,5 +1,5 @@ # ext/asyncio/events.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/exc.py b/lib/sqlalchemy/ext/asyncio/exc.py index 1cf6f363860..558187c0b41 100644 --- a/lib/sqlalchemy/ext/asyncio/exc.py +++ b/lib/sqlalchemy/ext/asyncio/exc.py @@ -1,5 +1,5 @@ # ext/asyncio/exc.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/result.py b/lib/sqlalchemy/ext/asyncio/result.py index ef8bd8cccd7..70d027f2502 100644 --- a/lib/sqlalchemy/ext/asyncio/result.py +++ b/lib/sqlalchemy/ext/asyncio/result.py @@ -1,5 +1,5 @@ # ext/asyncio/result.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/scoping.py b/lib/sqlalchemy/ext/asyncio/scoping.py index 6ecd5827e8b..d2df303868a 100644 --- a/lib/sqlalchemy/ext/asyncio/scoping.py +++ b/lib/sqlalchemy/ext/asyncio/scoping.py @@ -1,5 +1,5 @@ # ext/asyncio/scoping.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/asyncio/session.py b/lib/sqlalchemy/ext/asyncio/session.py index 6a12f9e5549..b1b63ca8d1c 100644 --- a/lib/sqlalchemy/ext/asyncio/session.py +++ b/lib/sqlalchemy/ext/asyncio/session.py @@ -1,5 +1,5 @@ # ext/asyncio/session.py -# Copyright (C) 2020-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2020-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/automap.py b/lib/sqlalchemy/ext/automap.py index 0c434dc266f..aae28eb5896 100644 --- a/lib/sqlalchemy/ext/automap.py +++ b/lib/sqlalchemy/ext/automap.py @@ -1,5 +1,5 @@ # ext/automap.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/baked.py b/lib/sqlalchemy/ext/baked.py index 7d68c3b1802..0c9eb7b8c5b 100644 --- a/lib/sqlalchemy/ext/baked.py +++ b/lib/sqlalchemy/ext/baked.py @@ -1,5 +1,5 @@ # ext/baked.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index 5b3b00ff258..ffa9f09db58 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -1,5 +1,5 @@ # ext/compiler.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/__init__.py b/lib/sqlalchemy/ext/declarative/__init__.py index f89c9219bcf..afbce73a494 100644 --- a/lib/sqlalchemy/ext/declarative/__init__.py +++ b/lib/sqlalchemy/ext/declarative/__init__.py @@ -1,5 +1,5 @@ # ext/declarative/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/declarative/extensions.py b/lib/sqlalchemy/ext/declarative/extensions.py index 125a335aeed..0da7cf3a6fb 100644 --- a/lib/sqlalchemy/ext/declarative/extensions.py +++ b/lib/sqlalchemy/ext/declarative/extensions.py @@ -1,5 +1,5 @@ # ext/declarative/extensions.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py index f66f78a9109..625e66be159 100644 --- a/lib/sqlalchemy/ext/horizontal_shard.py +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -1,5 +1,5 @@ # ext/horizontal_shard.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/hybrid.py b/lib/sqlalchemy/ext/hybrid.py index 2e0cbb815dc..45407427781 100644 --- a/lib/sqlalchemy/ext/hybrid.py +++ b/lib/sqlalchemy/ext/hybrid.py @@ -1,5 +1,5 @@ # ext/hybrid.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/indexable.py b/lib/sqlalchemy/ext/indexable.py index 1906dc71ceb..d2ccafecedb 100644 --- a/lib/sqlalchemy/ext/indexable.py +++ b/lib/sqlalchemy/ext/indexable.py @@ -1,5 +1,5 @@ # ext/indexable.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/instrumentation.py b/lib/sqlalchemy/ext/instrumentation.py index bfca24f243f..56741a23352 100644 --- a/lib/sqlalchemy/ext/instrumentation.py +++ b/lib/sqlalchemy/ext/instrumentation.py @@ -1,5 +1,5 @@ # ext/instrumentation.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mutable.py b/lib/sqlalchemy/ext/mutable.py index 968d48e1b81..ff4f82658a9 100644 --- a/lib/sqlalchemy/ext/mutable.py +++ b/lib/sqlalchemy/ext/mutable.py @@ -1,5 +1,5 @@ # ext/mutable.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/__init__.py b/lib/sqlalchemy/ext/mypy/__init__.py index de2c02ee9f1..b5827cb8d36 100644 --- a/lib/sqlalchemy/ext/mypy/__init__.py +++ b/lib/sqlalchemy/ext/mypy/__init__.py @@ -1,5 +1,5 @@ # ext/mypy/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/apply.py b/lib/sqlalchemy/ext/mypy/apply.py index 8136737b91b..694c41c8bd6 100644 --- a/lib/sqlalchemy/ext/mypy/apply.py +++ b/lib/sqlalchemy/ext/mypy/apply.py @@ -1,5 +1,5 @@ # ext/mypy/apply.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/decl_class.py b/lib/sqlalchemy/ext/mypy/decl_class.py index b3820cae131..bd12c8d5ce5 100644 --- a/lib/sqlalchemy/ext/mypy/decl_class.py +++ b/lib/sqlalchemy/ext/mypy/decl_class.py @@ -1,5 +1,5 @@ # ext/mypy/decl_class.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/infer.py b/lib/sqlalchemy/ext/mypy/infer.py index 2543cd5704d..b68308d72e6 100644 --- a/lib/sqlalchemy/ext/mypy/infer.py +++ b/lib/sqlalchemy/ext/mypy/infer.py @@ -1,5 +1,5 @@ # ext/mypy/infer.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/names.py b/lib/sqlalchemy/ext/mypy/names.py index 3db240cd809..9417a98cc7b 100644 --- a/lib/sqlalchemy/ext/mypy/names.py +++ b/lib/sqlalchemy/ext/mypy/names.py @@ -1,5 +1,5 @@ # ext/mypy/names.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/plugin.py b/lib/sqlalchemy/ext/mypy/plugin.py index 5e18aec695f..37379f7ef10 100644 --- a/lib/sqlalchemy/ext/mypy/plugin.py +++ b/lib/sqlalchemy/ext/mypy/plugin.py @@ -1,5 +1,5 @@ # ext/mypy/plugin.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/mypy/util.py b/lib/sqlalchemy/ext/mypy/util.py index 30df8332c54..956c1c9ea34 100644 --- a/lib/sqlalchemy/ext/mypy/util.py +++ b/lib/sqlalchemy/ext/mypy/util.py @@ -1,5 +1,5 @@ # ext/mypy/util.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index 0af05f6de75..ae097f26fa7 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,5 +1,5 @@ # ext/orderinglist.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/ext/serializer.py b/lib/sqlalchemy/ext/serializer.py index 689acae7e2b..478dcd6967d 100644 --- a/lib/sqlalchemy/ext/serializer.py +++ b/lib/sqlalchemy/ext/serializer.py @@ -1,5 +1,5 @@ # ext/serializer.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/__init__.py b/lib/sqlalchemy/future/__init__.py index 2a81152be5d..a5925383f7e 100644 --- a/lib/sqlalchemy/future/__init__.py +++ b/lib/sqlalchemy/future/__init__.py @@ -1,5 +1,5 @@ # future/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/engine.py b/lib/sqlalchemy/future/engine.py index 1aa2beb7b80..111f1cd71df 100644 --- a/lib/sqlalchemy/future/engine.py +++ b/lib/sqlalchemy/future/engine.py @@ -1,5 +1,5 @@ # future/engine.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/future/orm/__init__.py b/lib/sqlalchemy/future/orm/__init__.py index 501ff2ed2eb..65be463b8c7 100644 --- a/lib/sqlalchemy/future/orm/__init__.py +++ b/lib/sqlalchemy/future/orm/__init__.py @@ -1,5 +1,5 @@ # future/orm/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/inspection.py b/lib/sqlalchemy/inspection.py index 9e70cb8d271..17740956d5a 100644 --- a/lib/sqlalchemy/inspection.py +++ b/lib/sqlalchemy/inspection.py @@ -1,5 +1,5 @@ # inspection.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/log.py b/lib/sqlalchemy/log.py index 1183636dc8e..197a86de8cd 100644 --- a/lib/sqlalchemy/log.py +++ b/lib/sqlalchemy/log.py @@ -1,5 +1,5 @@ # log.py -# Copyright (C) 2006-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2006-2025 the SQLAlchemy authors and contributors # # Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk # diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 85da4ea9681..9e32dd4c263 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -1,5 +1,5 @@ # orm/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py index be8cd41c3d3..98c0742442a 100644 --- a/lib/sqlalchemy/orm/attributes.py +++ b/lib/sqlalchemy/orm/attributes.py @@ -1,5 +1,5 @@ # orm/attributes.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py index 3be8916e0b1..bd8d150d86f 100644 --- a/lib/sqlalchemy/orm/base.py +++ b/lib/sqlalchemy/orm/base.py @@ -1,5 +1,5 @@ # orm/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/clsregistry.py b/lib/sqlalchemy/orm/clsregistry.py index 0a09ccd8852..69e62df70ba 100644 --- a/lib/sqlalchemy/orm/clsregistry.py +++ b/lib/sqlalchemy/orm/clsregistry.py @@ -1,5 +1,5 @@ # orm/clsregistry.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py index 5e7e3586983..857bdae182e 100644 --- a/lib/sqlalchemy/orm/collections.py +++ b/lib/sqlalchemy/orm/collections.py @@ -1,5 +1,5 @@ # orm/collections.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/context.py b/lib/sqlalchemy/orm/context.py index 1f663c157ff..82515285dbd 100644 --- a/lib/sqlalchemy/orm/context.py +++ b/lib/sqlalchemy/orm/context.py @@ -1,5 +1,5 @@ # orm/context.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_api.py b/lib/sqlalchemy/orm/decl_api.py index 25d015aa20f..a2e8bbb86d5 100644 --- a/lib/sqlalchemy/orm/decl_api.py +++ b/lib/sqlalchemy/orm/decl_api.py @@ -1,5 +1,5 @@ # orm/decl_api.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/decl_base.py b/lib/sqlalchemy/orm/decl_base.py index fe2131690af..71dbd722f7f 100644 --- a/lib/sqlalchemy/orm/decl_base.py +++ b/lib/sqlalchemy/orm/decl_base.py @@ -1,5 +1,5 @@ # orm/decl_base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py index 702a27ac8b4..7aa812980c4 100644 --- a/lib/sqlalchemy/orm/dependency.py +++ b/lib/sqlalchemy/orm/dependency.py @@ -1,5 +1,5 @@ # orm/dependency.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py index 5045015ad32..fcfe5c9e89d 100644 --- a/lib/sqlalchemy/orm/descriptor_props.py +++ b/lib/sqlalchemy/orm/descriptor_props.py @@ -1,5 +1,5 @@ # orm/descriptor_props.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py index 0a0d17c08d8..a50558d21f3 100644 --- a/lib/sqlalchemy/orm/dynamic.py +++ b/lib/sqlalchemy/orm/dynamic.py @@ -1,5 +1,5 @@ # orm/dynamic.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py index 9fa2d4818c1..dc5dd1310c3 100644 --- a/lib/sqlalchemy/orm/evaluator.py +++ b/lib/sqlalchemy/orm/evaluator.py @@ -1,5 +1,5 @@ # orm/evaluator.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py index 6e777f08e8d..0341d08d091 100644 --- a/lib/sqlalchemy/orm/events.py +++ b/lib/sqlalchemy/orm/events.py @@ -1,5 +1,5 @@ # orm/events.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py index 9ef29a45361..b1ce8edba53 100644 --- a/lib/sqlalchemy/orm/exc.py +++ b/lib/sqlalchemy/orm/exc.py @@ -1,5 +1,5 @@ # orm/exc.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py index 419e8471e37..65798aec573 100644 --- a/lib/sqlalchemy/orm/identity.py +++ b/lib/sqlalchemy/orm/identity.py @@ -1,5 +1,5 @@ # orm/identity.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/instrumentation.py b/lib/sqlalchemy/orm/instrumentation.py index d8d7e1c914c..5c2345cab44 100644 --- a/lib/sqlalchemy/orm/instrumentation.py +++ b/lib/sqlalchemy/orm/instrumentation.py @@ -1,5 +1,5 @@ # orm/instrumentation.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 5237c04b680..895d932132c 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -1,5 +1,5 @@ # orm/interfaces.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py index 3cec7a01245..dc700421fed 100644 --- a/lib/sqlalchemy/orm/loading.py +++ b/lib/sqlalchemy/orm/loading.py @@ -1,5 +1,5 @@ # orm/loading.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py index 4da1a5673a4..ffd131a886b 100644 --- a/lib/sqlalchemy/orm/mapper.py +++ b/lib/sqlalchemy/orm/mapper.py @@ -1,5 +1,5 @@ # orm/mapper.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py index ab14c403c3d..a2391474470 100644 --- a/lib/sqlalchemy/orm/path_registry.py +++ b/lib/sqlalchemy/orm/path_registry.py @@ -1,5 +1,5 @@ # orm/path_registry.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py index a582ca1d76f..1e7b2b1cc0a 100644 --- a/lib/sqlalchemy/orm/persistence.py +++ b/lib/sqlalchemy/orm/persistence.py @@ -1,5 +1,5 @@ # orm/persistence.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 287bfdc1ded..e701dea7d67 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -1,5 +1,5 @@ # orm/properties.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 8ebc0216dba..9c0a2c17445 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1,5 +1,5 @@ # orm/query.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py index 2824e00e07c..a9a30a5ffea 100644 --- a/lib/sqlalchemy/orm/relationships.py +++ b/lib/sqlalchemy/orm/relationships.py @@ -1,5 +1,5 @@ # orm/relationships.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py index ccdb6503428..5be2a82cf5a 100644 --- a/lib/sqlalchemy/orm/scoping.py +++ b/lib/sqlalchemy/orm/scoping.py @@ -1,5 +1,5 @@ # orm/scoping.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 3c1e5b4477d..7caf8bca4cd 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -1,5 +1,5 @@ # orm/session.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py index 65daed3b1de..6175dc69e7b 100644 --- a/lib/sqlalchemy/orm/state.py +++ b/lib/sqlalchemy/orm/state.py @@ -1,5 +1,5 @@ # orm/state.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index dd9f8b87ae7..a560899c9b3 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1,5 +1,5 @@ # orm/strategies.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py index ed3c897b373..8157c5b61e5 100644 --- a/lib/sqlalchemy/orm/strategy_options.py +++ b/lib/sqlalchemy/orm/strategy_options.py @@ -1,5 +1,5 @@ # orm/strategy_options.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py index 96bcd7262fe..c6f2ab99278 100644 --- a/lib/sqlalchemy/orm/sync.py +++ b/lib/sqlalchemy/orm/sync.py @@ -1,5 +1,5 @@ # orm/sync.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index 9353282df5d..efb4ed54bc8 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -1,5 +1,5 @@ # orm/unitofwork.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py index 28bf5b76c8d..99a07190b5c 100644 --- a/lib/sqlalchemy/orm/util.py +++ b/lib/sqlalchemy/orm/util.py @@ -1,5 +1,5 @@ # orm/util.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/__init__.py b/lib/sqlalchemy/pool/__init__.py index b3368b493b0..7ad79ee8a32 100644 --- a/lib/sqlalchemy/pool/__init__.py +++ b/lib/sqlalchemy/pool/__init__.py @@ -1,5 +1,5 @@ # pool/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/base.py b/lib/sqlalchemy/pool/base.py index f739e9d99c5..5004bdda1c7 100644 --- a/lib/sqlalchemy/pool/base.py +++ b/lib/sqlalchemy/pool/base.py @@ -1,5 +1,5 @@ # pool/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/dbapi_proxy.py b/lib/sqlalchemy/pool/dbapi_proxy.py index 7acd6afbaed..930c242514a 100644 --- a/lib/sqlalchemy/pool/dbapi_proxy.py +++ b/lib/sqlalchemy/pool/dbapi_proxy.py @@ -1,5 +1,5 @@ # pool/dbapi_proxy.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/events.py b/lib/sqlalchemy/pool/events.py index cdbfa5dc123..a91c126ffa5 100644 --- a/lib/sqlalchemy/pool/events.py +++ b/lib/sqlalchemy/pool/events.py @@ -1,5 +1,5 @@ # pool/events.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/pool/impl.py b/lib/sqlalchemy/pool/impl.py index e08d66404a7..6bc4ad80474 100644 --- a/lib/sqlalchemy/pool/impl.py +++ b/lib/sqlalchemy/pool/impl.py @@ -1,5 +1,5 @@ # pool/impl.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/processors.py b/lib/sqlalchemy/processors.py index 3efc24d577c..92a7da64890 100644 --- a/lib/sqlalchemy/processors.py +++ b/lib/sqlalchemy/processors.py @@ -1,5 +1,5 @@ # processors.py -# Copyright (C) 2010-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2010-2025 the SQLAlchemy authors and contributors # # Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com # diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py index dbea1b76615..37949c7a4de 100644 --- a/lib/sqlalchemy/schema.py +++ b/lib/sqlalchemy/schema.py @@ -1,5 +1,5 @@ # schema.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py index 94f6d8d2387..0c8ae3307c5 100644 --- a/lib/sqlalchemy/sql/__init__.py +++ b/lib/sqlalchemy/sql/__init__.py @@ -1,5 +1,5 @@ # sql/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/annotation.py b/lib/sqlalchemy/sql/annotation.py index e9a8ffe8373..60455956865 100644 --- a/lib/sqlalchemy/sql/annotation.py +++ b/lib/sqlalchemy/sql/annotation.py @@ -1,5 +1,5 @@ # sql/annotation.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/base.py b/lib/sqlalchemy/sql/base.py index c44c693c79b..12049c08eaf 100644 --- a/lib/sqlalchemy/sql/base.py +++ b/lib/sqlalchemy/sql/base.py @@ -1,5 +1,5 @@ # sql/base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/coercions.py b/lib/sqlalchemy/sql/coercions.py index 6a86c24c6e5..d6fd8ba6b4a 100644 --- a/lib/sqlalchemy/sql/coercions.py +++ b/lib/sqlalchemy/sql/coercions.py @@ -1,5 +1,5 @@ # sql/coercions.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index b975c1cd7f9..ca9ca962869 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -1,5 +1,5 @@ # sql/compiler.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py index 2d0ceb0c182..49bac18121f 100644 --- a/lib/sqlalchemy/sql/crud.py +++ b/lib/sqlalchemy/sql/crud.py @@ -1,5 +1,5 @@ # sql/crud.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/ddl.py b/lib/sqlalchemy/sql/ddl.py index 343d8f046d4..2c88dc67d53 100644 --- a/lib/sqlalchemy/sql/ddl.py +++ b/lib/sqlalchemy/sql/ddl.py @@ -1,5 +1,5 @@ # sql/ddl.py -# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py index 257039459a6..e09c53b636b 100644 --- a/lib/sqlalchemy/sql/default_comparator.py +++ b/lib/sqlalchemy/sql/default_comparator.py @@ -1,5 +1,5 @@ # sql/default_comparator.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py index eb314dcbf0c..d25e3f85c62 100644 --- a/lib/sqlalchemy/sql/dml.py +++ b/lib/sqlalchemy/sql/dml.py @@ -1,5 +1,5 @@ # sql/dml.py -# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py index 96f2936fe78..6c9bbbaccfc 100644 --- a/lib/sqlalchemy/sql/elements.py +++ b/lib/sqlalchemy/sql/elements.py @@ -1,5 +1,5 @@ # sql/elements.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/events.py b/lib/sqlalchemy/sql/events.py index d650a962933..63327814c90 100644 --- a/lib/sqlalchemy/sql/events.py +++ b/lib/sqlalchemy/sql/events.py @@ -1,5 +1,5 @@ # sql/events.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index 009f061f633..787b698d1b7 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -1,5 +1,5 @@ # sql/expression.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py index cd22a131eb5..b44d5de3079 100644 --- a/lib/sqlalchemy/sql/functions.py +++ b/lib/sqlalchemy/sql/functions.py @@ -1,5 +1,5 @@ # sql/functions.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/lambdas.py b/lib/sqlalchemy/sql/lambdas.py index 0c3f24df804..446ceb09ff2 100644 --- a/lib/sqlalchemy/sql/lambdas.py +++ b/lib/sqlalchemy/sql/lambdas.py @@ -1,5 +1,5 @@ # sql/lambdas.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/naming.py b/lib/sqlalchemy/sql/naming.py index 5b2c49e378e..6b890303081 100644 --- a/lib/sqlalchemy/sql/naming.py +++ b/lib/sqlalchemy/sql/naming.py @@ -1,5 +1,5 @@ # sql/naming.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py index 82c8881018e..189b1e8dce1 100644 --- a/lib/sqlalchemy/sql/operators.py +++ b/lib/sqlalchemy/sql/operators.py @@ -1,5 +1,5 @@ # sql/operators.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/roles.py b/lib/sqlalchemy/sql/roles.py index 51b69f9b024..1f952b641c1 100644 --- a/lib/sqlalchemy/sql/roles.py +++ b/lib/sqlalchemy/sql/roles.py @@ -1,5 +1,5 @@ # sql/roles.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py index d1451666b70..a8812376c0c 100644 --- a/lib/sqlalchemy/sql/schema.py +++ b/lib/sqlalchemy/sql/schema.py @@ -1,5 +1,5 @@ # sql/schema.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py index cbec34d727e..f302822a5ae 100644 --- a/lib/sqlalchemy/sql/selectable.py +++ b/lib/sqlalchemy/sql/selectable.py @@ -1,5 +1,5 @@ # sql/selectable.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py index 94dfd84c781..5dc901bd219 100644 --- a/lib/sqlalchemy/sql/sqltypes.py +++ b/lib/sqlalchemy/sql/sqltypes.py @@ -1,5 +1,5 @@ # sql/sqltypes.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/traversals.py b/lib/sqlalchemy/sql/traversals.py index eb4913d7c33..b705c6aa191 100644 --- a/lib/sqlalchemy/sql/traversals.py +++ b/lib/sqlalchemy/sql/traversals.py @@ -1,5 +1,5 @@ # sql/traversals.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py index badadcec60b..8d9d2f254d9 100644 --- a/lib/sqlalchemy/sql/type_api.py +++ b/lib/sqlalchemy/sql/type_api.py @@ -1,5 +1,5 @@ # sql/type_api.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index f4361c3ecdd..2b6de8fd1e8 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -1,5 +1,5 @@ # sql/util.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index a3b2a69127b..cd73d369ab3 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -1,5 +1,5 @@ # sql/visitors.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/__init__.py b/lib/sqlalchemy/testing/__init__.py index db6abbd4f7b..62ca95a5f9e 100644 --- a/lib/sqlalchemy/testing/__init__.py +++ b/lib/sqlalchemy/testing/__init__.py @@ -1,5 +1,5 @@ # testing/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertions.py b/lib/sqlalchemy/testing/assertions.py index 6825eda204a..c08dba41f30 100644 --- a/lib/sqlalchemy/testing/assertions.py +++ b/lib/sqlalchemy/testing/assertions.py @@ -1,5 +1,5 @@ # testing/assertions.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py index dc5523cb87a..39eeddda7cc 100644 --- a/lib/sqlalchemy/testing/assertsql.py +++ b/lib/sqlalchemy/testing/assertsql.py @@ -1,5 +1,5 @@ # testing/assertsql.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/asyncio.py b/lib/sqlalchemy/testing/asyncio.py index 63b2a10da6d..5f15162002c 100644 --- a/lib/sqlalchemy/testing/asyncio.py +++ b/lib/sqlalchemy/testing/asyncio.py @@ -1,5 +1,5 @@ # testing/asyncio.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/config.py b/lib/sqlalchemy/testing/config.py index 45c789cb250..ae3061c0d87 100644 --- a/lib/sqlalchemy/testing/config.py +++ b/lib/sqlalchemy/testing/config.py @@ -1,5 +1,5 @@ # testing/config.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/engines.py b/lib/sqlalchemy/testing/engines.py index 64843f4e163..8cad9eda32f 100644 --- a/lib/sqlalchemy/testing/engines.py +++ b/lib/sqlalchemy/testing/engines.py @@ -1,5 +1,5 @@ # testing/engines.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/entities.py b/lib/sqlalchemy/testing/entities.py index 24b9067db50..6cec155cf23 100644 --- a/lib/sqlalchemy/testing/entities.py +++ b/lib/sqlalchemy/testing/entities.py @@ -1,5 +1,5 @@ # testing/entities.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/exclusions.py b/lib/sqlalchemy/testing/exclusions.py index f61b4b0ca47..1aff19c0ea2 100644 --- a/lib/sqlalchemy/testing/exclusions.py +++ b/lib/sqlalchemy/testing/exclusions.py @@ -1,5 +1,5 @@ # testing/exclusions.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/fixtures.py b/lib/sqlalchemy/testing/fixtures.py index ff650c47d12..0ba9343b580 100644 --- a/lib/sqlalchemy/testing/fixtures.py +++ b/lib/sqlalchemy/testing/fixtures.py @@ -1,5 +1,5 @@ # testing/fixtures.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/mock.py b/lib/sqlalchemy/testing/mock.py index 38f90dd4d79..d164c5856f4 100644 --- a/lib/sqlalchemy/testing/mock.py +++ b/lib/sqlalchemy/testing/mock.py @@ -1,5 +1,5 @@ # testing/mock.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/pickleable.py b/lib/sqlalchemy/testing/pickleable.py index 79f0ee90802..e2227e61828 100644 --- a/lib/sqlalchemy/testing/pickleable.py +++ b/lib/sqlalchemy/testing/pickleable.py @@ -1,5 +1,5 @@ # testing/pickleable.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/__init__.py b/lib/sqlalchemy/testing/plugin/__init__.py index 0f987773195..ce960be967d 100644 --- a/lib/sqlalchemy/testing/plugin/__init__.py +++ b/lib/sqlalchemy/testing/plugin/__init__.py @@ -1,5 +1,5 @@ # testing/plugin/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/bootstrap.py b/lib/sqlalchemy/testing/plugin/bootstrap.py index cc50912c8cb..e9cdff4bc04 100644 --- a/lib/sqlalchemy/testing/plugin/bootstrap.py +++ b/lib/sqlalchemy/testing/plugin/bootstrap.py @@ -1,5 +1,5 @@ # testing/plugin/bootstrap.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/plugin_base.py b/lib/sqlalchemy/testing/plugin/plugin_base.py index 693dfd4f24d..d78c2a76ff9 100644 --- a/lib/sqlalchemy/testing/plugin/plugin_base.py +++ b/lib/sqlalchemy/testing/plugin/plugin_base.py @@ -1,5 +1,5 @@ # testing/plugin/plugin_base.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/pytestplugin.py b/lib/sqlalchemy/testing/plugin/pytestplugin.py index 38b1b8cf3f7..2be6e6cda5a 100644 --- a/lib/sqlalchemy/testing/plugin/pytestplugin.py +++ b/lib/sqlalchemy/testing/plugin/pytestplugin.py @@ -1,5 +1,5 @@ # testing/plugin/pytestplugin.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py index 12ed987433d..a765090135a 100644 --- a/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py +++ b/lib/sqlalchemy/testing/plugin/reinvent_fixtures_py2k.py @@ -1,5 +1,5 @@ # testing/plugin/reinvent_fixtures_py2k.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py index 7796e1334ab..a116730ec98 100644 --- a/lib/sqlalchemy/testing/profiling.py +++ b/lib/sqlalchemy/testing/profiling.py @@ -1,5 +1,5 @@ # testing/profiling.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/provision.py b/lib/sqlalchemy/testing/provision.py index eea9c66dbca..370ee12c7eb 100644 --- a/lib/sqlalchemy/testing/provision.py +++ b/lib/sqlalchemy/testing/provision.py @@ -1,5 +1,5 @@ # testing/provision.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/requirements.py b/lib/sqlalchemy/testing/requirements.py index e1b2d609526..9164faa93e5 100644 --- a/lib/sqlalchemy/testing/requirements.py +++ b/lib/sqlalchemy/testing/requirements.py @@ -1,5 +1,5 @@ # testing/requirements.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py index e816e817280..1281a27e589 100644 --- a/lib/sqlalchemy/testing/schema.py +++ b/lib/sqlalchemy/testing/schema.py @@ -1,5 +1,5 @@ # testing/schema.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/__init__.py b/lib/sqlalchemy/testing/suite/__init__.py index a146cb3163c..8435aa004f3 100644 --- a/lib/sqlalchemy/testing/suite/__init__.py +++ b/lib/sqlalchemy/testing/suite/__init__.py @@ -1,5 +1,5 @@ # testing/suite/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_cte.py b/lib/sqlalchemy/testing/suite/test_cte.py index 9a1a2cf75a3..56180ca8d44 100644 --- a/lib/sqlalchemy/testing/suite/test_cte.py +++ b/lib/sqlalchemy/testing/suite/test_cte.py @@ -1,5 +1,5 @@ # testing/suite/test_cte.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_ddl.py b/lib/sqlalchemy/testing/suite/test_ddl.py index e09064cccde..ee8b802ac74 100644 --- a/lib/sqlalchemy/testing/suite/test_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_ddl.py @@ -1,5 +1,5 @@ # testing/suite/test_ddl.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_deprecations.py b/lib/sqlalchemy/testing/suite/test_deprecations.py index 676d2d1edf3..95eed768c32 100644 --- a/lib/sqlalchemy/testing/suite/test_deprecations.py +++ b/lib/sqlalchemy/testing/suite/test_deprecations.py @@ -1,5 +1,5 @@ # testing/suite/test_deprecations.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_dialect.py b/lib/sqlalchemy/testing/suite/test_dialect.py index 625d29c39c0..6f32fee86ed 100644 --- a/lib/sqlalchemy/testing/suite/test_dialect.py +++ b/lib/sqlalchemy/testing/suite/test_dialect.py @@ -1,6 +1,6 @@ #! coding: utf-8 # testing/suite/test_dialect.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_insert.py b/lib/sqlalchemy/testing/suite/test_insert.py index 2068e83a2ba..ebfdc13d915 100644 --- a/lib/sqlalchemy/testing/suite/test_insert.py +++ b/lib/sqlalchemy/testing/suite/test_insert.py @@ -1,5 +1,5 @@ # testing/suite/test_insert.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py index dbd6d1a6331..80ce8d69dd0 100644 --- a/lib/sqlalchemy/testing/suite/test_reflection.py +++ b/lib/sqlalchemy/testing/suite/test_reflection.py @@ -1,5 +1,5 @@ # testing/suite/test_reflection.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_results.py b/lib/sqlalchemy/testing/suite/test_results.py index ede30c6f8b1..26c55297500 100644 --- a/lib/sqlalchemy/testing/suite/test_results.py +++ b/lib/sqlalchemy/testing/suite/test_results.py @@ -1,5 +1,5 @@ # testing/suite/test_results.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_rowcount.py b/lib/sqlalchemy/testing/suite/test_rowcount.py index ca6995dbe46..346829c1d59 100644 --- a/lib/sqlalchemy/testing/suite/test_rowcount.py +++ b/lib/sqlalchemy/testing/suite/test_rowcount.py @@ -1,5 +1,5 @@ # testing/suite/test_rowcount.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_select.py b/lib/sqlalchemy/testing/suite/test_select.py index eca2203d58f..5731207ec75 100644 --- a/lib/sqlalchemy/testing/suite/test_select.py +++ b/lib/sqlalchemy/testing/suite/test_select.py @@ -1,5 +1,5 @@ # testing/suite/test_select.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_sequence.py b/lib/sqlalchemy/testing/suite/test_sequence.py index 596dee803e9..8d22d425b61 100644 --- a/lib/sqlalchemy/testing/suite/test_sequence.py +++ b/lib/sqlalchemy/testing/suite/test_sequence.py @@ -1,5 +1,5 @@ # testing/suite/test_sequence.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_types.py b/lib/sqlalchemy/testing/suite/test_types.py index 31a63ef3c22..046ada282cf 100644 --- a/lib/sqlalchemy/testing/suite/test_types.py +++ b/lib/sqlalchemy/testing/suite/test_types.py @@ -1,5 +1,5 @@ # testing/suite/test_types.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py index 6740772e339..abe19d435bc 100644 --- a/lib/sqlalchemy/testing/suite/test_unicode_ddl.py +++ b/lib/sqlalchemy/testing/suite/test_unicode_ddl.py @@ -1,5 +1,5 @@ # testing/suite/test_unicode_ddl.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/suite/test_update_delete.py b/lib/sqlalchemy/testing/suite/test_update_delete.py index 1604fcd2d14..90ba651a164 100644 --- a/lib/sqlalchemy/testing/suite/test_update_delete.py +++ b/lib/sqlalchemy/testing/suite/test_update_delete.py @@ -1,5 +1,5 @@ # testing/suite/test_update_delete.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py index 8bfe5477e46..9bf71eb34a7 100644 --- a/lib/sqlalchemy/testing/util.py +++ b/lib/sqlalchemy/testing/util.py @@ -1,5 +1,5 @@ # testing/util.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/testing/warnings.py b/lib/sqlalchemy/testing/warnings.py index c4c3ecf00f4..5537bd4f9fc 100644 --- a/lib/sqlalchemy/testing/warnings.py +++ b/lib/sqlalchemy/testing/warnings.py @@ -1,5 +1,5 @@ # testing/warnings.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py index 096b3576966..6182a01c141 100644 --- a/lib/sqlalchemy/types.py +++ b/lib/sqlalchemy/types.py @@ -1,5 +1,5 @@ # types.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 289e191cf83..078723c048a 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -1,5 +1,5 @@ # util/__init__.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index 6c2a5aef38a..d5ac2a64252 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1,5 +1,5 @@ # util/_collections.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_compat_py3k.py b/lib/sqlalchemy/util/_compat_py3k.py index 8317112d944..6c4e37c6a6c 100644 --- a/lib/sqlalchemy/util/_compat_py3k.py +++ b/lib/sqlalchemy/util/_compat_py3k.py @@ -1,5 +1,5 @@ # util/_compat_py3k.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_concurrency_py3k.py b/lib/sqlalchemy/util/_concurrency_py3k.py index 1e4ffefa401..141193ef06e 100644 --- a/lib/sqlalchemy/util/_concurrency_py3k.py +++ b/lib/sqlalchemy/util/_concurrency_py3k.py @@ -1,5 +1,5 @@ # util/_concurrency_py3k.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/_preloaded.py b/lib/sqlalchemy/util/_preloaded.py index 9a962adcb91..22f1379242a 100644 --- a/lib/sqlalchemy/util/_preloaded.py +++ b/lib/sqlalchemy/util/_preloaded.py @@ -1,5 +1,5 @@ # util/_preloaded.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py index d70cc82ffd7..bdb4a97854b 100644 --- a/lib/sqlalchemy/util/compat.py +++ b/lib/sqlalchemy/util/compat.py @@ -1,5 +1,5 @@ # util/compat.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/concurrency.py b/lib/sqlalchemy/util/concurrency.py index 546d82a836a..7341dbe685c 100644 --- a/lib/sqlalchemy/util/concurrency.py +++ b/lib/sqlalchemy/util/concurrency.py @@ -1,5 +1,5 @@ # util/concurrency.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/deprecations.py b/lib/sqlalchemy/util/deprecations.py index 0a3266eed69..bf537ba9be0 100644 --- a/lib/sqlalchemy/util/deprecations.py +++ b/lib/sqlalchemy/util/deprecations.py @@ -1,5 +1,5 @@ # util/deprecations.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py index f3e960c1b7c..5d6e89257c4 100644 --- a/lib/sqlalchemy/util/langhelpers.py +++ b/lib/sqlalchemy/util/langhelpers.py @@ -1,5 +1,5 @@ # util/langhelpers.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/queue.py b/lib/sqlalchemy/util/queue.py index c6f66f2473a..70dc387a990 100644 --- a/lib/sqlalchemy/util/queue.py +++ b/lib/sqlalchemy/util/queue.py @@ -1,5 +1,5 @@ # util/queue.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/tool_support.py b/lib/sqlalchemy/util/tool_support.py index a203a2ab75a..407c2d45075 100644 --- a/lib/sqlalchemy/util/tool_support.py +++ b/lib/sqlalchemy/util/tool_support.py @@ -1,5 +1,5 @@ # util/tool_support.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py index b6bfc7415e3..27ee27bfc3a 100644 --- a/lib/sqlalchemy/util/topological.py +++ b/lib/sqlalchemy/util/topological.py @@ -1,5 +1,5 @@ # util/topological.py -# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors +# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors # # # This module is part of SQLAlchemy and is released under diff --git a/setup.cfg b/setup.cfg index 3f8003a1ed3..b4dd728ead1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -139,6 +139,7 @@ ignore = A003, A004, A005, A006 D, E203,E305,E711,E712,E721,E722,E741, + FA100, N801,N802,N806, RST304,RST303,RST299,RST399, W503,W504 From 00728b53753bf14febe10587b9acc36c7eab99d8 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Fri, 3 Jan 2025 20:56:37 +0100 Subject: [PATCH 627/632] avoid using ubuntu 24 since it's currently having issues Change-Id: I53ca096258988e5a7e76c46c5a5881c662a58d39 --- .github/workflows/create-wheels.yaml | 20 ++++++++++---------- .github/workflows/run-on-pr.yaml | 10 +++++----- .github/workflows/run-test.yaml | 28 ++++++++++++++-------------- 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/workflows/create-wheels.yaml b/.github/workflows/create-wheels.yaml index 0e8d5b1a102..68ece92b131 100644 --- a/.github/workflows/create-wheels.yaml +++ b/.github/workflows/create-wheels.yaml @@ -121,7 +121,7 @@ jobs: strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. @@ -144,14 +144,14 @@ jobs: extra-requires: "mock" exclude: - # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" python-version: cp27-cp27m - - os: "ubuntu-latest" + - os: "ubuntu-22.04" python-version: cp27-cp27mu - - os: "ubuntu-latest" + - os: "ubuntu-22.04" python-version: cp36-cp36m - # ubuntu-20.04 does not need to test what ubuntu-latest supports + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports - os: "ubuntu-20.04" python-version: cp37-cp37m - os: "ubuntu-20.04" @@ -312,7 +312,7 @@ jobs: strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" - "ubuntu-20.04" python-version: # the versions are - as specified in PEP 425. @@ -324,10 +324,10 @@ jobs: - cp311-cp311 - cp312-cp312 exclude: - # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" python-version: cp36-cp36m - # ubuntu-20.04 does not need to test what ubuntu-latest supports + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports - os: "ubuntu-20.04" python-version: cp37-cp37m - os: "ubuntu-20.04" diff --git a/.github/workflows/run-on-pr.yaml b/.github/workflows/run-on-pr.yaml index ab51b357b44..7634e68d105 100644 --- a/.github/workflows/run-on-pr.yaml +++ b/.github/workflows/run-on-pr.yaml @@ -23,7 +23,7 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - "3.11" build-type: @@ -62,7 +62,7 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - "3.11" @@ -95,7 +95,7 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - "3.11" @@ -123,8 +123,8 @@ jobs: # Arm emulation is quite slow (~20min) so for now just run it when merging to main # run-test-arm64: - # name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-latest - # runs-on: ubuntu-latest + # name: ${{ matrix.python-version }}-${{ matrix.build-type }}-arm64-ubuntu-22.04 + # runs-on: ubuntu-22.04 # strategy: # matrix: # python-version: diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index 8ad1d1deec4..ad4f9f40c25 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -28,7 +28,7 @@ jobs: matrix: os: - "ubuntu-20.04" - - "ubuntu-latest" + - "ubuntu-22.04" - "windows-latest" - "macos-13" python-version: @@ -54,21 +54,21 @@ jobs: # - python-version: "pypy-3.9" # pytest-args: "-k 'not test_autocommit_on and not test_turn_autocommit_off_via_default_iso_level and not test_autocommit_isolation_level'" # add aiosqlite on linux - - os: "ubuntu-latest" + - os: "ubuntu-22.04" pytest-args: "--dbdriver pysqlite --dbdriver aiosqlite" exclude: # linux and osx do not have x86 python - - os: "ubuntu-latest" + - os: "ubuntu-22.04" architecture: x86 - os: "ubuntu-20.04" architecture: x86 - os: "macos-13" architecture: x86 - # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" python-version: "3.6" - # ubuntu-20.04 does not need to test what ubuntu-latest supports + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports - os: "ubuntu-20.04" python-version: "3.7" - os: "ubuntu-20.04" @@ -116,7 +116,7 @@ jobs: strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - cp27-cp27m - cp27-cp27mu @@ -150,7 +150,7 @@ jobs: strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - cp36-cp36m - cp37-cp37m @@ -163,10 +163,10 @@ jobs: - "cext" - "nocext" exclude: - # ubuntu-latest does not have: py27, py36 - - os: "ubuntu-latest" + # ubuntu-22.04 does not have: py27, py36 + - os: "ubuntu-22.04" python-version: cp36-cp36m - # ubuntu-20.04 does not need to test what ubuntu-latest supports + # ubuntu-20.04 does not need to test what ubuntu-22.04 supports - os: "ubuntu-20.04" python-version: cp37-cp37m - os: "ubuntu-20.04" @@ -208,7 +208,7 @@ jobs: # run this job using this matrix, excluding some combinations below. matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - "3.7" - "3.8" @@ -217,7 +217,7 @@ jobs: - "3.11" - "3.12" include: - # ubuntu-latest does not have: py27, py36 + # ubuntu-22.04 does not have: py27, py36 - os: "ubuntu-20.04" python-version: "3.6" fail-fast: false @@ -248,7 +248,7 @@ jobs: strategy: matrix: os: - - "ubuntu-latest" + - "ubuntu-22.04" python-version: - "3.11" From 7e7a62aa296e1fbadcb34f562ec6a498a35c46c9 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Sat, 4 Jan 2025 18:28:26 +0100 Subject: [PATCH 628/632] fix failing tests in github actions Change-Id: Ia37a7bcaafcc5fa8e1fe4783706d1185351cd14e --- .github/workflows/run-test.yaml | 5 +---- test/dialect/test_sqlite.py | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/run-test.yaml b/.github/workflows/run-test.yaml index ad4f9f40c25..0693a53e3e5 100644 --- a/.github/workflows/run-test.yaml +++ b/.github/workflows/run-test.yaml @@ -210,16 +210,13 @@ jobs: os: - "ubuntu-22.04" python-version: + # ubuntu-22.04 does not have: py27, py36. Mypy no longer supports it - "3.7" - "3.8" - "3.9" - "3.10" - "3.11" - "3.12" - include: - # ubuntu-22.04 does not have: py27, py36 - - os: "ubuntu-20.04" - python-version: "3.6" fail-fast: false # steps to run in each job. Some are github actions, others run shell commands diff --git a/test/dialect/test_sqlite.py b/test/dialect/test_sqlite.py index 12e607020e0..55833761e9d 100644 --- a/test/dialect/test_sqlite.py +++ b/test/dialect/test_sqlite.py @@ -3546,6 +3546,7 @@ def test_on_conflict_do_update_special_types_in_set(self, connection): class ComputedReflectionTest(fixtures.TestBase): __only_on__ = "sqlite" + __requires__ = ("computed_columns",) __backend__ = True @classmethod From c575a5440d05e0761333b3bca9f8c7130322b2e9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 13 Jan 2025 08:14:14 -0500 Subject: [PATCH 629/632] update logging_name doc engine logging has not used hex-strings in a very long time Change-Id: I77131e3eb2f72129fde1d9ab6dd4b4e40bc313c6 (cherry picked from commit 214088c42ad61794e315174c41ee92a3c408ae6c) --- doc/build/core/engines.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst index d52ad6b13c1..42c21d12430 100644 --- a/doc/build/core/engines.rst +++ b/doc/build/core/engines.rst @@ -641,12 +641,16 @@ getting duplicate log lines. Setting the Logging Name ------------------------- -The logger name of instance such as an :class:`~sqlalchemy.engine.Engine` or -:class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier -string. To set this to a specific name, use the +The logger name for :class:`~sqlalchemy.engine.Engine` or +:class:`~sqlalchemy.pool.Pool` is set to be the module-qualified class name of the +object. This name can be further qualified with an additional name +using the :paramref:`_sa.create_engine.logging_name` and -:paramref:`_sa.create_engine.pool_logging_name` with -:func:`sqlalchemy.create_engine`:: +:paramref:`_sa.create_engine.pool_logging_name` parameters with +:func:`sqlalchemy.create_engine`; the name will be appended to existing +class-qualified logging name. This use is recommended for applications that +make use of multiple global :class:`.Engine` instances simultaenously, so +that they may be distinguished in logging:: >>> from sqlalchemy import create_engine >>> from sqlalchemy import text From 73345dd6b4a94636ddebe4cb633ab6671da50000 Mon Sep 17 00:00:00 2001 From: Federico Caselli Date: Wed, 26 Mar 2025 21:43:10 +0100 Subject: [PATCH 630/632] document sqlite truncate_microseconds in DATETIME and TIME Change-Id: I93412d951b466343f2cf9b6d513ad46d17f5d8ee (cherry picked from commit a9b37199133eea81ebdf062439352ef2745d3c00) --- lib/sqlalchemy/dialects/sqlite/base.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 21ee004997f..94715747059 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -937,6 +937,10 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime): regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" ) + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the datetime. Can't be specified together with ``storage_format`` + or ``regexp``. + :param storage_format: format string which will be applied to the dict with keys year, month, day, hour, minute, second, and microsecond. @@ -1103,6 +1107,10 @@ class TIME(_DateTimeMixin, sqltypes.Time): regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") ) + :param truncate_microseconds: when ``True`` microseconds will be truncated + from the time. Can't be specified together with ``storage_format`` + or ``regexp``. + :param storage_format: format string which will be applied to the dict with keys hour, minute, second, and microsecond. From 9d4b9adba0811d607e9f2fa8657648e96475ee57 Mon Sep 17 00:00:00 2001 From: Matt John Date: Tue, 15 Apr 2025 20:05:36 +0100 Subject: [PATCH 631/632] chore: Fix typo of psycopg2 in comment (#12526) This is the first example in the documentation of a particular connector, which mgith result in copy+pastes, resulting in an error (cherry picked from commit f2a9ecde29bb9d5daadd0626054ff8b54865c781) Change-Id: I493f7d4ac780ac61c6ae17695de71bab19e67a46 --- lib/sqlalchemy/dialects/postgresql/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c3cedf6d605..0e98a41bc60 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -265,7 +265,7 @@ def use_identity(element, compiler, **kw): from sqlalchemy import event postgresql_engine = create_engine( - "postgresql+pyscopg2://scott:tiger@hostname/dbname", + "postgresql+psycopg2://scott:tiger@hostname/dbname", # disable default reset-on-return scheme pool_reset_on_return=None, From 0f2f87a3e9669eb90c825c86e6f36c7bc870d2c2 Mon Sep 17 00:00:00 2001 From: krave1986 Date: Tue, 6 May 2025 03:38:19 +0800 Subject: [PATCH 632/632] Fix issues in versioning.rst (#12567) (cherry picked from commit e1f2f204c1b2967486d160b19a8ddf21c0b698bf) --- doc/build/orm/versioning.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/build/orm/versioning.rst b/doc/build/orm/versioning.rst index 790c1c1f92e..8cb827f8105 100644 --- a/doc/build/orm/versioning.rst +++ b/doc/build/orm/versioning.rst @@ -230,14 +230,14 @@ at our choosing:: __mapper_args__ = {"version_id_col": version_uuid, "version_id_generator": False} - u1 = User(name="u1", version_uuid=uuid.uuid4()) + u1 = User(name="u1", version_uuid=uuid.uuid4().hex) session.add(u1) session.commit() u1.name = "u2" - u1.version_uuid = uuid.uuid4() + u1.version_uuid = uuid.uuid4().hex session.commit()